Revert scheduler update (#46659)

Conrad Irwin created

Reverts the new scheduler; it's destroyed our CI

Release Notes:

- N/A

Change summary

Cargo.lock                                                       |   5 
Cargo.toml                                                       |   1 
crates/agent/src/edit_agent/evals.rs                             |   5 
crates/agent_ui/src/completion_provider.rs                       |   4 
crates/agent_ui/src/inline_assistant.rs                          |  12 
crates/agent_ui/src/language_model_selector.rs                   |  32 
crates/agent_ui/src/profile_selector.rs                          |  10 
crates/buffer_diff/src/buffer_diff.rs                            |  76 
crates/client/src/test.rs                                        |   7 
crates/collab/src/db.rs                                          |   2 
crates/collab/src/tests/channel_buffer_tests.rs                  |   2 
crates/collab/src/tests/editor_tests.rs                          |  62 
crates/collab/src/tests/following_tests.rs                       |   3 
crates/collab/src/tests/integration_tests.rs                     |   3 
crates/collab/src/tests/random_project_collaboration_tests.rs    |   6 
crates/collab/src/tests/randomized_test_helpers.rs               |   4 
crates/collab_ui/src/collab_panel.rs                             |  19 
crates/collab_ui/src/collab_panel/channel_modal.rs               |   2 
crates/command_palette/src/command_palette.rs                    |   2 
crates/component_preview/src/component_preview_example.rs        |   2 
crates/editor/benches/display_map.rs                             |   6 
crates/editor/benches/editor_render.rs                           |   2 
crates/editor/src/display_map/wrap_map.rs                        |   4 
crates/editor/src/editor_tests.rs                                |  26 
crates/editor/src/indent_guides.rs                               |   2 
crates/editor/src/inlays/inlay_hints.rs                          |  68 
crates/editor/src/test/editor_lsp_test_context.rs                |   4 
crates/extension_host/benches/extension_compilation_benchmark.rs |  12 
crates/extension_host/src/extension_host.rs                      |  46 
crates/extension_host/src/extension_store_test.rs                | 161 
crates/fs/src/fake_git_repo.rs                                   |  20 
crates/fs/src/fs.rs                                              |   3 
crates/gpui/Cargo.toml                                           |   6 
crates/gpui/src/app.rs                                           |  34 
crates/gpui/src/app/test_context.rs                              |  12 
crates/gpui/src/app/visual_test_context.rs                       |   7 
crates/gpui/src/element.rs                                       |   9 
crates/gpui/src/executor.rs                                      | 819 +
crates/gpui/src/gpui.rs                                          |   2 
crates/gpui/src/platform.rs                                      |  70 
crates/gpui/src/platform/linux/dispatcher.rs                     | 106 
crates/gpui/src/platform/linux/headless/client.rs                |   9 
crates/gpui/src/platform/linux/platform.rs                       |   7 
crates/gpui/src/platform/linux/wayland/client.rs                 |  45 
crates/gpui/src/platform/linux/x11/client.rs                     |  39 
crates/gpui/src/platform/mac/dispatcher.rs                       | 151 
crates/gpui/src/platform/mac/platform.rs                         |   6 
crates/gpui/src/platform/test/dispatcher.rs                      | 316 
crates/gpui/src/platform/test/platform.rs                        |   6 
crates/gpui/src/platform/visual_test.rs                          |  14 
crates/gpui/src/platform/windows/dispatcher.rs                   |  73 
crates/gpui/src/platform/windows/platform.rs                     |   4 
crates/gpui/src/platform_scheduler.rs                            | 138 
crates/gpui/src/profiler.rs                                      |   1 
crates/gpui/src/queue.rs                                         |  20 
crates/gpui/src/test.rs                                          |   3 
crates/gpui/src/text_system/line_wrapper.rs                      |   3 
crates/gpui/src/window.rs                                        |  72 
crates/gpui_macros/src/test.rs                                   |   4 
crates/language/src/buffer.rs                                    |  33 
crates/language/src/buffer_tests.rs                              |  12 
crates/language/src/language_registry.rs                         |  22 
crates/language_models/src/provider/mistral.rs                   |  21 
crates/language_models/src/provider/open_ai.rs                   |   4 
crates/livekit_client/src/livekit_client/playback/source.rs      |  17 
crates/lsp/src/lsp.rs                                            |   2 
crates/miniprofiler_ui/src/miniprofiler_ui.rs                    |   2 
crates/multi_buffer/src/multi_buffer_tests.rs                    |   4 
crates/project/src/project_settings.rs                           |   4 
crates/project/src/project_tests.rs                              |  34 
crates/project_panel/src/project_panel.rs                        | 176 
crates/project_symbols/src/project_symbols.rs                    |   4 
crates/remote_server/src/unix.rs                                 |   4 
crates/repl/Cargo.toml                                           |   1 
crates/repl/src/repl.rs                                          |  32 
crates/scheduler/Cargo.toml                                      |   1 
crates/scheduler/src/executor.rs                                 | 270 
crates/scheduler/src/scheduler.rs                                | 105 
crates/scheduler/src/test_scheduler.rs                           | 290 
crates/scheduler/src/tests.rs                                    | 112 
crates/storybook/src/stories/picker.rs                           |   4 
crates/terminal/src/terminal.rs                                  |   7 
crates/workspace/src/workspace.rs                                |   3 
crates/worktree/src/worktree.rs                                  |  85 
crates/zed/src/main.rs                                           |   8 
crates/zed/src/reliability.rs                                    |   2 
crates/zed/src/visual_test_runner.rs                             |  22 
crates/zed/src/zed.rs                                            |  10 
88 files changed, 1,804 insertions(+), 2,076 deletions(-)

Detailed changes

Cargo.lock 🔗

@@ -7335,7 +7335,6 @@ dependencies = [
  "calloop",
  "calloop-wayland-source",
  "cbindgen",
- "chrono",
  "circular-buffer",
  "cocoa 0.26.0",
  "cocoa-foundation 0.2.0",
@@ -7352,6 +7351,7 @@ dependencies = [
  "env_logger 0.11.8",
  "etagere",
  "filedescriptor",
+ "flume",
  "foreign-types 0.5.0",
  "futures 0.3.31",
  "gpui_macros",
@@ -7384,7 +7384,6 @@ dependencies = [
  "refineable",
  "reqwest_client",
  "resvg",
- "scheduler",
  "schemars",
  "seahash",
  "semver",
@@ -13585,7 +13584,6 @@ dependencies = [
  "alacritty_terminal",
  "anyhow",
  "async-dispatcher",
- "async-task",
  "async-tungstenite",
  "base64 0.22.1",
  "client",
@@ -14368,7 +14366,6 @@ dependencies = [
  "async-task",
  "backtrace",
  "chrono",
- "flume",
  "futures 0.3.31",
  "parking_lot",
  "rand 0.9.2",

Cargo.toml 🔗

@@ -380,7 +380,6 @@ rodio = { git = "https://github.com/RustAudio/rodio", rev ="e2074c6c2acf07b57cf7
 rope = { path = "crates/rope" }
 rpc = { path = "crates/rpc" }
 rules_library = { path = "crates/rules_library" }
-scheduler = { path = "crates/scheduler" }
 search = { path = "crates/search" }
 session = { path = "crates/session" }
 settings = { path = "crates/settings" }

crates/agent/src/edit_agent/evals.rs 🔗

@@ -1337,10 +1337,9 @@ impl EvalAssertion {
 }
 
 fn run_eval(eval: EvalInput) -> eval_utils::EvalOutput<EditEvalMetadata> {
-    let dispatcher = gpui::TestDispatcher::new(rand::random());
+    let dispatcher = gpui::TestDispatcher::new(StdRng::from_os_rng());
     let mut cx = TestAppContext::build(dispatcher, None);
-    let foreground_executor = cx.foreground_executor().clone();
-    let result = foreground_executor.block_test(async {
+    let result = cx.executor().block_test(async {
         let test = EditAgentTest::new(&mut cx).await;
         test.eval(eval, &mut cx).await
     });

crates/agent_ui/src/completion_provider.rs 🔗

@@ -1558,7 +1558,7 @@ pub(crate) fn search_symbols(
             });
 
         const MAX_MATCHES: usize = 100;
-        let mut visible_matches = cx.foreground_executor().block_on(fuzzy::match_strings(
+        let mut visible_matches = cx.background_executor().block(fuzzy::match_strings(
             &visible_match_candidates,
             &query,
             false,
@@ -1567,7 +1567,7 @@ pub(crate) fn search_symbols(
             &cancellation_flag,
             cx.background_executor().clone(),
         ));
-        let mut external_matches = cx.foreground_executor().block_on(fuzzy::match_strings(
+        let mut external_matches = cx.background_executor().block(fuzzy::match_strings(
             &external_match_candidates,
             &query,
             false,

crates/agent_ui/src/inline_assistant.rs 🔗

@@ -2102,9 +2102,9 @@ pub mod test {
             cx.set_global(inline_assistant);
         });
 
-        let foreground_executor = cx.foreground_executor().clone();
-        let project =
-            foreground_executor.block_test(async { Project::test(fs.clone(), [], cx).await });
+        let project = cx
+            .executor()
+            .block_test(async { Project::test(fs.clone(), [], cx).await });
 
         // Create workspace with window
         let (workspace, cx) = cx.add_window_view(|window, cx| {
@@ -2162,7 +2162,8 @@ pub mod test {
 
         test(cx);
 
-        let assist_id = foreground_executor
+        let assist_id = cx
+            .executor()
             .block_test(async { completion_rx.next().await })
             .unwrap()
             .unwrap();
@@ -2205,6 +2206,7 @@ pub mod evals {
     use eval_utils::{EvalOutput, NoProcessor};
     use gpui::TestAppContext;
     use language_model::{LanguageModelRegistry, SelectedModel};
+    use rand::{SeedableRng as _, rngs::StdRng};
 
     use crate::inline_assistant::test::{InlineAssistantOutput, run_inline_assistant_test};
 
@@ -2306,7 +2308,7 @@ pub mod evals {
         let prompt = prompt.into();
 
         eval_utils::eval(iterations, expected_pass_ratio, NoProcessor, move || {
-            let dispatcher = gpui::TestDispatcher::new(rand::random());
+            let dispatcher = gpui::TestDispatcher::new(StdRng::from_os_rng());
             let mut cx = TestAppContext::build(dispatcher, None);
             cx.skip_drawing();
 

crates/agent_ui/src/language_model_selector.rs 🔗

@@ -4,8 +4,7 @@ use agent_settings::AgentSettings;
 use collections::{HashMap, HashSet, IndexMap};
 use fuzzy::{StringMatch, StringMatchCandidate, match_strings};
 use gpui::{
-    Action, AnyElement, App, BackgroundExecutor, DismissEvent, FocusHandle, ForegroundExecutor,
-    Subscription, Task,
+    Action, AnyElement, App, BackgroundExecutor, DismissEvent, FocusHandle, Subscription, Task,
 };
 use language_model::{
     AuthenticateError, ConfiguredModel, IconOrSvg, LanguageModel, LanguageModelId,
@@ -362,28 +361,22 @@ enum LanguageModelPickerEntry {
 
 struct ModelMatcher {
     models: Vec<ModelInfo>,
-    fg_executor: ForegroundExecutor,
     bg_executor: BackgroundExecutor,
     candidates: Vec<StringMatchCandidate>,
 }
 
 impl ModelMatcher {
-    fn new(
-        models: Vec<ModelInfo>,
-        fg_executor: ForegroundExecutor,
-        bg_executor: BackgroundExecutor,
-    ) -> ModelMatcher {
+    fn new(models: Vec<ModelInfo>, bg_executor: BackgroundExecutor) -> ModelMatcher {
         let candidates = Self::make_match_candidates(&models);
         Self {
             models,
-            fg_executor,
             bg_executor,
             candidates,
         }
     }
 
     pub fn fuzzy_search(&self, query: &str) -> Vec<ModelInfo> {
-        let mut matches = self.fg_executor.block_on(match_strings(
+        let mut matches = self.bg_executor.block(match_strings(
             &self.candidates,
             query,
             false,
@@ -479,7 +472,6 @@ impl PickerDelegate for LanguageModelPickerDelegate {
     ) -> Task<()> {
         let all_models = self.all_models.clone();
         let active_model = (self.get_active_model)(cx);
-        let fg_executor = cx.foreground_executor();
         let bg_executor = cx.background_executor();
 
         let language_model_registry = LanguageModelRegistry::global(cx);
@@ -511,10 +503,8 @@ impl PickerDelegate for LanguageModelPickerDelegate {
             .cloned()
             .collect::<Vec<_>>();
 
-        let matcher_rec =
-            ModelMatcher::new(recommended_models, fg_executor.clone(), bg_executor.clone());
-        let matcher_all =
-            ModelMatcher::new(available_models, fg_executor.clone(), bg_executor.clone());
+        let matcher_rec = ModelMatcher::new(recommended_models, bg_executor.clone());
+        let matcher_all = ModelMatcher::new(available_models, bg_executor.clone());
 
         let recommended = matcher_rec.exact_search(&query);
         let all = matcher_all.fuzzy_search(&query);
@@ -759,11 +749,7 @@ mod tests {
             ("ollama", "mistral"),
             ("ollama", "deepseek"),
         ]);
-        let matcher = ModelMatcher::new(
-            models,
-            cx.foreground_executor().clone(),
-            cx.background_executor.clone(),
-        );
+        let matcher = ModelMatcher::new(models, cx.background_executor.clone());
 
         // The order of models should be maintained, case doesn't matter
         let results = matcher.exact_search("GPT-4.1");
@@ -791,11 +777,7 @@ mod tests {
             ("ollama", "mistral"),
             ("ollama", "deepseek"),
         ]);
-        let matcher = ModelMatcher::new(
-            models,
-            cx.foreground_executor().clone(),
-            cx.background_executor.clone(),
-        );
+        let matcher = ModelMatcher::new(models, cx.background_executor.clone());
 
         // Results should preserve models order whenever possible.
         // In the case below, `zed/gpt-4.1` and `openai/gpt-4.1` have identical

crates/agent_ui/src/profile_selector.rs 🔗

@@ -6,7 +6,7 @@ use fs::Fs;
 use fuzzy::{StringMatch, StringMatchCandidate, match_strings};
 use gpui::{
     Action, AnyElement, App, BackgroundExecutor, Context, DismissEvent, Entity, FocusHandle,
-    Focusable, ForegroundExecutor, SharedString, Subscription, Task, Window,
+    Focusable, SharedString, Subscription, Task, Window,
 };
 use picker::{Picker, PickerDelegate, popover_menu::PickerPopoverMenu};
 use settings::{Settings as _, SettingsStore, update_settings_file};
@@ -103,7 +103,6 @@ impl ProfileSelector {
                 self.fs.clone(),
                 self.provider.clone(),
                 self.profiles.clone(),
-                cx.foreground_executor().clone(),
                 cx.background_executor().clone(),
                 self.focus_handle.clone(),
                 cx,
@@ -240,7 +239,6 @@ enum ProfilePickerEntry {
 pub(crate) struct ProfilePickerDelegate {
     fs: Arc<dyn Fs>,
     provider: Arc<dyn ProfileProvider>,
-    foreground: ForegroundExecutor,
     background: BackgroundExecutor,
     candidates: Vec<ProfileCandidate>,
     string_candidates: Arc<Vec<StringMatchCandidate>>,
@@ -257,7 +255,6 @@ impl ProfilePickerDelegate {
         fs: Arc<dyn Fs>,
         provider: Arc<dyn ProfileProvider>,
         profiles: AvailableProfiles,
-        foreground: ForegroundExecutor,
         background: BackgroundExecutor,
         focus_handle: FocusHandle,
         cx: &mut Context<ProfileSelector>,
@@ -269,7 +266,6 @@ impl ProfilePickerDelegate {
         let mut this = Self {
             fs,
             provider,
-            foreground,
             background,
             candidates,
             string_candidates,
@@ -405,7 +401,7 @@ impl ProfilePickerDelegate {
 
         let cancel_flag = AtomicBool::new(false);
 
-        self.foreground.block_on(match_strings(
+        self.background.block(match_strings(
             self.string_candidates.as_ref(),
             query,
             false,
@@ -738,7 +734,6 @@ mod tests {
             let delegate = ProfilePickerDelegate {
                 fs: FakeFs::new(cx.background_executor().clone()),
                 provider: Arc::new(TestProfileProvider::new(AgentProfileId("write".into()))),
-                foreground: cx.foreground_executor().clone(),
                 background: cx.background_executor().clone(),
                 candidates,
                 string_candidates: Arc::new(Vec::new()),
@@ -776,7 +771,6 @@ mod tests {
             let delegate = ProfilePickerDelegate {
                 fs: FakeFs::new(cx.background_executor().clone()),
                 provider: Arc::new(TestProfileProvider::new(AgentProfileId("write".into()))),
-                foreground: cx.foreground_executor().clone(),
                 background: cx.background_executor().clone(),
                 candidates,
                 string_candidates: Arc::new(Vec::new()),

crates/buffer_diff/src/buffer_diff.rs 🔗

@@ -1,16 +1,22 @@
 use futures::channel::oneshot;
 use git2::{DiffLineType as GitDiffLineType, DiffOptions as GitOptions, Patch as GitPatch};
-use gpui::{App, AppContext as _, Context, Entity, EventEmitter, Task};
+use gpui::{App, AppContext as _, Context, Entity, EventEmitter, Task, TaskLabel};
 use language::{
     BufferRow, Capability, DiffOptions, File, Language, LanguageName, LanguageRegistry,
     language_settings::language_settings, word_diff_ranges,
 };
 use rope::Rope;
-use std::{cmp::Ordering, future::Future, iter, ops::Range, sync::Arc};
+use std::{
+    cmp::Ordering,
+    iter,
+    ops::Range,
+    sync::{Arc, LazyLock},
+};
 use sum_tree::SumTree;
 use text::{Anchor, Bias, BufferId, OffsetRangeExt, Point, ToOffset as _, ToPoint as _};
 use util::ResultExt;
 
+pub static CALCULATE_DIFF_TASK: LazyLock<TaskLabel> = LazyLock::new(TaskLabel::new);
 pub const MAX_WORD_DIFF_LINE_COUNT: usize = 5;
 
 pub struct BufferDiff {
@@ -1132,9 +1138,10 @@ impl BufferDiff {
         cx: &mut Context<Self>,
     ) -> Self {
         let mut this = BufferDiff::new(&buffer, cx);
+        let executor = cx.background_executor().clone();
         let mut base_text = base_text.to_owned();
         text::LineEnding::normalize(&mut base_text);
-        let inner = cx.foreground_executor().block_on(this.update_diff(
+        let inner = executor.block(this.update_diff(
             buffer.clone(),
             Some(Arc::from(base_text)),
             true,
@@ -1247,36 +1254,37 @@ impl BufferDiff {
             cx,
         );
 
-        cx.background_executor().spawn(async move {
-            let base_text_rope = if let Some(base_text) = &base_text {
-                if base_text_changed {
-                    Rope::from(base_text.as_ref())
+        cx.background_executor()
+            .spawn_labeled(*CALCULATE_DIFF_TASK, async move {
+                let base_text_rope = if let Some(base_text) = &base_text {
+                    if base_text_changed {
+                        Rope::from(base_text.as_ref())
+                    } else {
+                        prev_base_text
+                    }
                 } else {
-                    prev_base_text
+                    Rope::new()
+                };
+                let base_text_exists = base_text.is_some();
+                let hunks = compute_hunks(
+                    base_text
+                        .clone()
+                        .map(|base_text| (base_text, base_text_rope.clone())),
+                    buffer.clone(),
+                    diff_options,
+                );
+                let base_text = base_text.unwrap_or_default();
+                let inner = BufferDiffInner {
+                    base_text,
+                    hunks,
+                    base_text_exists,
+                    pending_hunks: SumTree::new(&buffer),
+                };
+                BufferDiffUpdate {
+                    inner,
+                    base_text_changed,
                 }
-            } else {
-                Rope::new()
-            };
-            let base_text_exists = base_text.is_some();
-            let hunks = compute_hunks(
-                base_text
-                    .clone()
-                    .map(|base_text| (base_text, base_text_rope.clone())),
-                buffer.clone(),
-                diff_options,
-            );
-            let base_text = base_text.unwrap_or_default();
-            let inner = BufferDiffInner {
-                base_text,
-                hunks,
-                base_text_exists,
-                pending_hunks: SumTree::new(&buffer),
-            };
-            BufferDiffUpdate {
-                inner,
-                base_text_changed,
-            }
-        })
+            })
     }
 
     pub fn language_changed(
@@ -1495,10 +1503,10 @@ impl BufferDiff {
         let language = self.base_text(cx).language().cloned();
         let base_text = self.base_text_string(cx).map(|s| s.as_str().into());
         let fut = self.update_diff(buffer.clone(), base_text, false, language, cx);
-        let fg_executor = cx.foreground_executor().clone();
-        let snapshot = fg_executor.block_on(fut);
+        let executor = cx.background_executor().clone();
+        let snapshot = executor.block(fut);
         let fut = self.set_snapshot_with_secondary_inner(snapshot, buffer, None, false, cx);
-        let (changed_range, base_text_changed_range) = fg_executor.block_on(fut);
+        let (changed_range, base_text_changed_range) = executor.block(fut);
         cx.emit(BufferDiffEvent::DiffChanged {
             changed_range,
             base_text_changed_range,

crates/client/src/test.rs 🔗

@@ -3,7 +3,7 @@ use anyhow::{Context as _, Result, anyhow};
 use cloud_api_client::{AuthenticatedUser, GetAuthenticatedUserResponse, PlanInfo};
 use cloud_llm_client::{CurrentUsage, PlanV1, UsageData, UsageLimit};
 use futures::{StreamExt, stream::BoxStream};
-use gpui::{AppContext as _, Entity, TestAppContext};
+use gpui::{AppContext as _, BackgroundExecutor, Entity, TestAppContext};
 use http_client::{AsyncBody, Method, Request, http};
 use parking_lot::Mutex;
 use rpc::{ConnectionId, Peer, Receipt, TypedEnvelope, proto};
@@ -13,6 +13,7 @@ pub struct FakeServer {
     peer: Arc<Peer>,
     state: Arc<Mutex<FakeServerState>>,
     user_id: u64,
+    executor: BackgroundExecutor,
 }
 
 #[derive(Default)]
@@ -34,6 +35,7 @@ impl FakeServer {
             peer: Peer::new(0),
             state: Default::default(),
             user_id: client_user_id,
+            executor: cx.executor(),
         };
 
         client.http_client().as_fake().replace_handler({
@@ -179,6 +181,8 @@ impl FakeServer {
 
     #[allow(clippy::await_holding_lock)]
     pub async fn receive<M: proto::EnvelopedMessage>(&self) -> Result<TypedEnvelope<M>> {
+        self.executor.start_waiting();
+
         let message = self
             .state
             .lock()
@@ -188,6 +192,7 @@ impl FakeServer {
             .next()
             .await
             .context("other half hung up")?;
+        self.executor.finish_waiting();
         let type_name = message.payload_type_name();
         let message = message.into_any();
 

crates/collab/src/db.rs 🔗

@@ -250,6 +250,8 @@ impl Database {
     {
         #[cfg(test)]
         {
+            use rand::prelude::*;
+
             let test_options = self.test_options.as_ref().unwrap();
             test_options.executor.simulate_random_delay().await;
             let fail_probability = *test_options.query_failure_probability.lock();

crates/collab/src/tests/channel_buffer_tests.rs 🔗

@@ -254,6 +254,7 @@ async fn test_channel_notes_participant_indices(
     let (workspace_b, cx_b) = client_b.build_workspace(&project_b, cx_b);
 
     // Clients A and B open the same file.
+    executor.start_waiting();
     let editor_a = workspace_a
         .update_in(cx_a, |workspace, window, cx| {
             workspace.open_path(
@@ -268,6 +269,7 @@ async fn test_channel_notes_participant_indices(
         .unwrap()
         .downcast::<Editor>()
         .unwrap();
+    executor.start_waiting();
     let editor_b = workspace_b
         .update_in(cx_b, |workspace, window, cx| {
             workspace.open_path(

crates/collab/src/tests/editor_tests.rs 🔗

@@ -67,7 +67,7 @@ async fn test_host_disconnect(
     client_a
         .fs()
         .insert_tree(
-            path!("/a"),
+            "/a",
             json!({
                 "a.txt": "a-contents",
                 "b.txt": "b-contents",
@@ -76,7 +76,7 @@ async fn test_host_disconnect(
         .await;
 
     let active_call_a = cx_a.read(ActiveCall::global);
-    let (project_a, worktree_id) = client_a.build_local_project(path!("/a"), cx_a).await;
+    let (project_a, worktree_id) = client_a.build_local_project("/a", cx_a).await;
 
     let worktree_a = project_a.read_with(cx_a, |project, cx| project.worktrees(cx).next().unwrap());
     let project_id = active_call_a
@@ -153,7 +153,7 @@ async fn test_host_disconnect(
 
     // Allow client A to reconnect to the server.
     server.allow_connections();
-    cx_a.background_executor.advance_clock(RECONNECT_TIMEOUT);
+    cx_a.background_executor.advance_clock(RECEIVE_TIMEOUT);
 
     // Client B calls client A again after they reconnected.
     let active_call_b = cx_b.read(ActiveCall::global);
@@ -429,51 +429,6 @@ async fn test_collaborating_with_completion(cx_a: &mut TestAppContext, cx_b: &mu
         assert!(!buffer.completion_triggers().is_empty())
     });
 
-    // Set up the completion request handlers BEFORE typing the trigger character.
-    // This is critical - the handlers must be in place when the request arrives,
-    // otherwise the requests will time out waiting for a response.
-    let mut first_completion_request = fake_language_server
-        .set_request_handler::<lsp::request::Completion, _, _>(|params, _| async move {
-            assert_eq!(
-                params.text_document_position.text_document.uri,
-                lsp::Uri::from_file_path(path!("/a/main.rs")).unwrap(),
-            );
-            assert_eq!(
-                params.text_document_position.position,
-                lsp::Position::new(0, 14),
-            );
-
-            Ok(Some(lsp::CompletionResponse::Array(vec![
-                lsp::CompletionItem {
-                    label: "first_method(…)".into(),
-                    detail: Some("fn(&mut self, B) -> C".into()),
-                    text_edit: Some(lsp::CompletionTextEdit::Edit(lsp::TextEdit {
-                        new_text: "first_method($1)".to_string(),
-                        range: lsp::Range::new(
-                            lsp::Position::new(0, 14),
-                            lsp::Position::new(0, 14),
-                        ),
-                    })),
-                    insert_text_format: Some(lsp::InsertTextFormat::SNIPPET),
-                    ..Default::default()
-                },
-                lsp::CompletionItem {
-                    label: "second_method(…)".into(),
-                    detail: Some("fn(&mut self, C) -> D<E>".into()),
-                    text_edit: Some(lsp::CompletionTextEdit::Edit(lsp::TextEdit {
-                        new_text: "second_method()".to_string(),
-                        range: lsp::Range::new(
-                            lsp::Position::new(0, 14),
-                            lsp::Position::new(0, 14),
-                        ),
-                    })),
-                    insert_text_format: Some(lsp::InsertTextFormat::SNIPPET),
-                    ..Default::default()
-                },
-            ])))
-        });
-    let mut second_completion_request = second_fake_language_server
-        .set_request_handler::<lsp::request::Completion, _, _>(|_, _| async move { Ok(None) });
     // Type a completion trigger character as the guest.
     editor_b.update_in(cx_b, |editor, window, cx| {
         editor.change_selections(SelectionEffects::no_scroll(), window, cx, |s| {
@@ -487,10 +442,6 @@ async fn test_collaborating_with_completion(cx_a: &mut TestAppContext, cx_b: &mu
     cx_b.background_executor.run_until_parked();
     cx_a.background_executor.run_until_parked();
 
-    // Wait for the completion requests to be received by the fake language servers.
-    first_completion_request.next().await.unwrap();
-    second_completion_request.next().await.unwrap();
-
     // Open the buffer on the host.
     let buffer_a = project_a
         .update(cx_a, |p, cx| {
@@ -1422,7 +1373,6 @@ async fn test_language_server_statuses(cx_a: &mut TestAppContext, cx_b: &mut Tes
         .unwrap();
 
     let fake_language_server = fake_language_servers.next().await.unwrap();
-    executor.run_until_parked();
     fake_language_server.start_progress("the-token").await;
 
     executor.advance_clock(SERVER_PROGRESS_THROTTLE_TIMEOUT);
@@ -1892,6 +1842,7 @@ async fn test_on_input_format_from_guest_to_host(
 
     // Receive an OnTypeFormatting request as the host's language server.
     // Return some formatting from the host's language server.
+    executor.start_waiting();
     fake_language_server
         .set_request_handler::<lsp::request::OnTypeFormatting, _, _>(|params, _| async move {
             assert_eq!(
@@ -1911,6 +1862,7 @@ async fn test_on_input_format_from_guest_to_host(
         .next()
         .await
         .unwrap();
+    executor.finish_waiting();
 
     // Open the buffer on the host and see that the formatting worked
     let buffer_a = project_a
@@ -2286,6 +2238,8 @@ async fn test_inlay_hint_refresh_is_forwarded(
     let (workspace_a, cx_a) = client_a.build_workspace(&project_a, cx_a);
     let (workspace_b, cx_b) = client_b.build_workspace(&project_b, cx_b);
 
+    cx_a.background_executor.start_waiting();
+
     let editor_a = workspace_a
         .update_in(cx_a, |workspace, window, cx| {
             workspace.open_path((worktree_id, rel_path("main.rs")), None, true, window, cx)
@@ -2349,6 +2303,7 @@ async fn test_inlay_hint_refresh_is_forwarded(
         .next()
         .await
         .unwrap();
+    executor.finish_waiting();
 
     executor.run_until_parked();
     editor_a.update(cx_a, |editor, cx| {
@@ -2960,6 +2915,7 @@ async fn test_lsp_pull_diagnostics(
         .unwrap();
 
     let (workspace_a, cx_a) = client_a.build_workspace(&project_a, cx_a);
+    executor.start_waiting();
 
     // The host opens a rust file.
     let _buffer_a = project_a

crates/collab/src/tests/following_tests.rs 🔗

@@ -2051,9 +2051,6 @@ async fn test_following_to_channel_notes_without_a_shared_project(
         });
     });
 
-    // Ensure client A's edits are synced to the server before client B starts following.
-    deterministic.run_until_parked();
-
     // Client B follows client A.
     workspace_b
         .update_in(cx_b, |workspace, window, cx| {

crates/collab/src/tests/integration_tests.rs 🔗

@@ -4358,7 +4358,6 @@ async fn test_collaborating_with_lsp_progress_updates_and_diagnostics_ordering(
 
     // Simulate a language server reporting errors for a file.
     let fake_language_server = fake_language_servers.next().await.unwrap();
-    executor.run_until_parked();
     fake_language_server
         .request::<lsp::request::WorkDoneProgressCreate>(lsp::WorkDoneProgressCreateParams {
             token: lsp::NumberOrString::String("the-disk-based-token".to_string()),
@@ -4571,7 +4570,6 @@ async fn test_formatting_buffer(
         project.register_buffer_with_language_servers(&buffer_b, cx)
     });
     let fake_language_server = fake_language_servers.next().await.unwrap();
-    executor.run_until_parked();
     fake_language_server.set_request_handler::<lsp::request::Formatting, _, _>(|_, _| async move {
         Ok(Some(vec![
             lsp::TextEdit {
@@ -5632,7 +5630,6 @@ async fn test_project_symbols(
         .unwrap();
 
     let fake_language_server = fake_language_servers.next().await.unwrap();
-    executor.run_until_parked();
     fake_language_server.set_request_handler::<lsp::WorkspaceSymbolRequest, _, _>(
         |_, _| async move {
             Ok(Some(lsp::WorkspaceSymbolResponse::Flat(vec![

crates/collab/src/tests/random_project_collaboration_tests.rs 🔗

@@ -1110,8 +1110,7 @@ impl RandomizedTest for ProjectCollaborationTest {
                             let fs = fs.clone();
                             move |_, cx| {
                                 let background = cx.background_executor();
-                                let rng = background.rng();
-                                let mut rng = rng.lock();
+                                let mut rng = background.rng();
                                 let count = rng.random_range::<usize, _>(1..3);
                                 let files = fs.as_fake().files();
                                 let files = (0..count)
@@ -1137,8 +1136,7 @@ impl RandomizedTest for ProjectCollaborationTest {
                                 move |_, cx| {
                                     let mut highlights = Vec::new();
                                     let background = cx.background_executor();
-                                    let rng = background.rng();
-                                    let mut rng = rng.lock();
+                                    let mut rng = background.rng();
 
                                     let highlight_count = rng.random_range(1..=5);
                                     for _ in 0..highlight_count {

crates/collab/src/tests/randomized_test_helpers.rs 🔗

@@ -174,7 +174,9 @@ pub async fn run_randomized_test<T: RandomizedTest>(
     }
 
     drop(operation_channels);
+    executor.start_waiting();
     futures::future::join_all(client_tasks).await;
+    executor.finish_waiting();
 
     executor.run_until_parked();
     T::on_quiesce(&mut server, &mut clients).await;
@@ -522,8 +524,10 @@ impl<T: RandomizedTest> TestPlan<T> {
                 server.forbid_connections();
                 server.disconnect_client(removed_peer_id);
                 deterministic.advance_clock(RECEIVE_TIMEOUT + RECONNECT_TIMEOUT);
+                deterministic.start_waiting();
                 log::info!("waiting for user {} to exit...", removed_user_id);
                 client_task.await;
+                deterministic.finish_waiting();
                 server.allow_connections();
 
                 for project in client.dev_server_projects().iter() {

crates/collab_ui/src/collab_panel.rs 🔗

@@ -488,7 +488,6 @@ impl CollabPanel {
         let channel_store = self.channel_store.read(cx);
         let user_store = self.user_store.read(cx);
         let query = self.filter_editor.read(cx).text(cx);
-        let fg_executor = cx.foreground_executor();
         let executor = cx.background_executor().clone();
 
         let prev_selected_entry = self.selection.and_then(|ix| self.entries.get(ix).cloned());
@@ -518,7 +517,7 @@ impl CollabPanel {
                     self.match_candidates.clear();
                     self.match_candidates
                         .push(StringMatchCandidate::new(0, &user.github_login));
-                    let matches = fg_executor.block_on(match_strings(
+                    let matches = executor.block(match_strings(
                         &self.match_candidates,
                         &query,
                         true,
@@ -562,7 +561,7 @@ impl CollabPanel {
                             &participant.user.github_login,
                         )
                     }));
-                let mut matches = fg_executor.block_on(match_strings(
+                let mut matches = executor.block(match_strings(
                     &self.match_candidates,
                     &query,
                     true,
@@ -614,7 +613,7 @@ impl CollabPanel {
                             StringMatchCandidate::new(id, &participant.github_login)
                         },
                     ));
-                let matches = fg_executor.block_on(match_strings(
+                let matches = executor.block(match_strings(
                     &self.match_candidates,
                     &query,
                     true,
@@ -649,7 +648,7 @@ impl CollabPanel {
                 .ordered_channels()
                 .map(|(_, chan)| chan)
                 .collect::<Vec<_>>();
-            let matches = fg_executor.block_on(match_strings(
+            let matches = executor.block(match_strings(
                 &self.match_candidates,
                 &query,
                 true,
@@ -751,7 +750,7 @@ impl CollabPanel {
                     .enumerate()
                     .map(|(ix, channel)| StringMatchCandidate::new(ix, &channel.name)),
             );
-            let matches = fg_executor.block_on(match_strings(
+            let matches = executor.block(match_strings(
                 &self.match_candidates,
                 &query,
                 true,
@@ -787,7 +786,7 @@ impl CollabPanel {
                     .enumerate()
                     .map(|(ix, user)| StringMatchCandidate::new(ix, &user.github_login)),
             );
-            let matches = fg_executor.block_on(match_strings(
+            let matches = executor.block(match_strings(
                 &self.match_candidates,
                 &query,
                 true,
@@ -812,7 +811,7 @@ impl CollabPanel {
                     .enumerate()
                     .map(|(ix, user)| StringMatchCandidate::new(ix, &user.github_login)),
             );
-            let matches = fg_executor.block_on(match_strings(
+            let matches = executor.block(match_strings(
                 &self.match_candidates,
                 &query,
                 true,
@@ -846,14 +845,14 @@ impl CollabPanel {
                     .map(|(ix, contact)| StringMatchCandidate::new(ix, &contact.user.github_login)),
             );
 
-            let matches = fg_executor.block_on(match_strings(
+            let matches = executor.block(match_strings(
                 &self.match_candidates,
                 &query,
                 true,
                 true,
                 usize::MAX,
                 &Default::default(),
-                executor,
+                executor.clone(),
             ));
 
             let (online_contacts, offline_contacts) = matches

crates/collab_ui/src/collab_panel/channel_modal.rs 🔗

@@ -297,7 +297,7 @@ impl PickerDelegate for ChannelModalDelegate {
                             StringMatchCandidate::new(id, &member.user.github_login)
                         }));
 
-                    let matches = cx.foreground_executor().block_on(match_strings(
+                    let matches = cx.background_executor().block(match_strings(
                         &self.match_candidates,
                         &query,
                         true,

crates/command_palette/src/command_palette.rs 🔗

@@ -526,7 +526,7 @@ impl PickerDelegate for CommandPaletteDelegate {
         };
 
         match cx
-            .foreground_executor()
+            .background_executor()
             .block_with_timeout(duration, rx.clone().recv())
         {
             Ok(Some((commands, matches, interceptor_result))) => {

crates/component_preview/src/component_preview_example.rs 🔗

@@ -53,7 +53,7 @@ pub fn run_component_preview() {
         let user_store = cx.new(|cx| UserStore::new(client.clone(), cx));
         let workspace_store = cx.new(|cx| WorkspaceStore::new(client.clone(), cx));
         let session_id = uuid::Uuid::new_v4().to_string();
-        let session = cx.foreground_executor().block_on(Session::new(session_id));
+        let session = cx.background_executor().block(Session::new(session_id));
         let session = cx.new(|cx| AppSession::new(session, cx));
         let node_runtime = NodeRuntime::unavailable();
 

crates/editor/benches/display_map.rs 🔗

@@ -9,7 +9,8 @@ use text::Bias;
 use util::RandomCharIter;
 
 fn to_tab_point_benchmark(c: &mut Criterion) {
-    let dispatcher = TestDispatcher::new(1);
+    let rng = StdRng::seed_from_u64(1);
+    let dispatcher = TestDispatcher::new(rng);
     let cx = gpui::TestAppContext::build(dispatcher, None);
 
     let create_tab_map = |length: usize| {
@@ -54,7 +55,8 @@ fn to_tab_point_benchmark(c: &mut Criterion) {
 }
 
 fn to_fold_point_benchmark(c: &mut Criterion) {
-    let dispatcher = TestDispatcher::new(1);
+    let rng = StdRng::seed_from_u64(1);
+    let dispatcher = TestDispatcher::new(rng);
     let cx = gpui::TestAppContext::build(dispatcher, None);
 
     let create_tab_map = |length: usize| {

crates/editor/benches/editor_render.rs 🔗

@@ -116,7 +116,7 @@ fn editor_render(bencher: &mut Bencher<'_>, cx: &TestAppContext) {
 }
 
 pub fn benches() {
-    let dispatcher = TestDispatcher::new(1);
+    let dispatcher = TestDispatcher::new(StdRng::seed_from_u64(1));
     let cx = gpui::TestAppContext::build(dispatcher, None);
     cx.update(|cx| {
         let store = SettingsStore::test(cx);

crates/editor/src/display_map/wrap_map.rs 🔗

@@ -212,7 +212,7 @@ impl WrapMap {
             });
 
             match cx
-                .foreground_executor()
+                .background_executor()
                 .block_with_timeout(Duration::from_millis(5), task)
             {
                 Ok((snapshot, edits)) => {
@@ -292,7 +292,7 @@ impl WrapMap {
             });
 
             match cx
-                .foreground_executor()
+                .background_executor()
                 .block_with_timeout(Duration::from_millis(1), update_task)
             {
                 Ok((snapshot, output_edits)) => {

crates/editor/src/editor_tests.rs 🔗

@@ -11901,6 +11901,7 @@ async fn test_document_format_during_save(cx: &mut TestAppContext) {
     });
     assert!(cx.read(|cx| editor.is_dirty(cx)));
 
+    cx.executor().start_waiting();
     let fake_server = fake_servers.next().await.unwrap();
 
     {
@@ -11930,6 +11931,7 @@ async fn test_document_format_during_save(cx: &mut TestAppContext) {
                 )
             })
             .unwrap();
+        cx.executor().start_waiting();
         save.await;
 
         assert_eq!(
@@ -11970,6 +11972,7 @@ async fn test_document_format_during_save(cx: &mut TestAppContext) {
             })
             .unwrap();
         cx.executor().advance_clock(super::FORMAT_TIMEOUT);
+        cx.executor().start_waiting();
         save.await;
         assert_eq!(
             editor.update(cx, |editor, cx| editor.text(cx)),
@@ -12015,6 +12018,7 @@ async fn test_document_format_during_save(cx: &mut TestAppContext) {
                 )
             })
             .unwrap();
+        cx.executor().start_waiting();
         save.await;
     }
 }
@@ -12081,6 +12085,7 @@ async fn test_redo_after_noop_format(cx: &mut TestAppContext) {
                 )
             })
             .unwrap();
+        cx.executor().start_waiting();
         save.await;
         assert!(!cx.read(|cx| editor.is_dirty(cx)));
     }
@@ -12249,6 +12254,7 @@ async fn test_multibuffer_format_during_save(cx: &mut TestAppContext) {
     });
     cx.executor().run_until_parked();
 
+    cx.executor().start_waiting();
     let save = multi_buffer_editor
         .update_in(cx, |editor, window, cx| {
             editor.save(
@@ -12510,6 +12516,7 @@ async fn setup_range_format_test(
         build_editor_with_project(project.clone(), buffer, window, cx)
     });
 
+    cx.executor().start_waiting();
     let fake_server = fake_servers.next().await.unwrap();
 
     (project, editor, cx, fake_server)
@@ -12551,6 +12558,7 @@ async fn test_range_format_on_save_success(cx: &mut TestAppContext) {
         })
         .next()
         .await;
+    cx.executor().start_waiting();
     save.await;
     assert_eq!(
         editor.update(cx, |editor, cx| editor.text(cx)),
@@ -12593,6 +12601,7 @@ async fn test_range_format_on_save_timeout(cx: &mut TestAppContext) {
         })
         .unwrap();
     cx.executor().advance_clock(super::FORMAT_TIMEOUT);
+    cx.executor().start_waiting();
     save.await;
     assert_eq!(
         editor.update(cx, |editor, cx| editor.text(cx)),
@@ -12624,6 +12633,7 @@ async fn test_range_format_not_called_for_clean_buffer(cx: &mut TestAppContext)
             panic!("Should not be invoked");
         })
         .next();
+    cx.executor().start_waiting();
     save.await;
     cx.run_until_parked();
 }
@@ -12730,6 +12740,7 @@ async fn test_document_format_manual_trigger(cx: &mut TestAppContext) {
         editor.set_text("one\ntwo\nthree\n", window, cx)
     });
 
+    cx.executor().start_waiting();
     let fake_server = fake_servers.next().await.unwrap();
 
     let format = editor
@@ -12757,6 +12768,7 @@ async fn test_document_format_manual_trigger(cx: &mut TestAppContext) {
         })
         .next()
         .await;
+    cx.executor().start_waiting();
     format.await;
     assert_eq!(
         editor.update(cx, |editor, cx| editor.text(cx)),
@@ -12789,6 +12801,7 @@ async fn test_document_format_manual_trigger(cx: &mut TestAppContext) {
         })
         .unwrap();
     cx.executor().advance_clock(super::FORMAT_TIMEOUT);
+    cx.executor().start_waiting();
     format.await;
     assert_eq!(
         editor.update(cx, |editor, cx| editor.text(cx)),
@@ -12843,6 +12856,8 @@ async fn test_multiple_formatters(cx: &mut TestAppContext) {
         build_editor_with_project(project.clone(), buffer, window, cx)
     });
 
+    cx.executor().start_waiting();
+
     let fake_server = fake_servers.next().await.unwrap();
     fake_server.set_request_handler::<lsp::request::Formatting, _, _>(
         move |_params, _| async move {
@@ -12944,6 +12959,7 @@ async fn test_multiple_formatters(cx: &mut TestAppContext) {
         }
     });
 
+    cx.executor().start_waiting();
     editor
         .update_in(cx, |editor, window, cx| {
             editor.perform_format(
@@ -13111,6 +13127,7 @@ async fn test_organize_imports_manual_trigger(cx: &mut TestAppContext) {
         )
     });
 
+    cx.executor().start_waiting();
     let fake_server = fake_servers.next().await.unwrap();
 
     let format = editor
@@ -13156,6 +13173,7 @@ async fn test_organize_imports_manual_trigger(cx: &mut TestAppContext) {
         })
         .next()
         .await;
+    cx.executor().start_waiting();
     format.await;
     assert_eq!(
         editor.update(cx, |editor, cx| editor.text(cx)),
@@ -13191,6 +13209,7 @@ async fn test_organize_imports_manual_trigger(cx: &mut TestAppContext) {
         })
         .unwrap();
     cx.executor().advance_clock(super::CODE_ACTION_TIMEOUT);
+    cx.executor().start_waiting();
     format.await;
     assert_eq!(
         editor.update(cx, |editor, cx| editor.text(cx)),
@@ -13243,7 +13262,9 @@ async fn test_concurrent_format_requests(cx: &mut TestAppContext) {
 
     // Wait for both format requests to complete
     cx.executor().advance_clock(Duration::from_millis(200));
+    cx.executor().start_waiting();
     format_1.await.unwrap();
+    cx.executor().start_waiting();
     format_2.await.unwrap();
 
     // The formatting edits only happens once.
@@ -14763,7 +14784,6 @@ async fn test_completion_in_multibuffer_with_replace_range(cx: &mut TestAppConte
     });
 
     let fake_server = fake_servers.next().await.unwrap();
-    cx.run_until_parked();
 
     editor.update_in(cx, |editor, window, cx| {
         editor.change_selections(SelectionEffects::no_scroll(), window, cx, |s| {
@@ -15133,7 +15153,6 @@ async fn test_completion_can_run_commands(cx: &mut TestAppContext) {
         .downcast::<Editor>()
         .unwrap();
     let _fake_server = fake_servers.next().await.unwrap();
-    cx.run_until_parked();
 
     editor.update_in(cx, |editor, window, cx| {
         cx.focus_self(window);
@@ -15869,7 +15888,6 @@ async fn test_multiline_completion(cx: &mut TestAppContext) {
         .downcast::<Editor>()
         .unwrap();
     let fake_server = fake_servers.next().await.unwrap();
-    cx.run_until_parked();
 
     let multiline_label = "StickyHeaderExcerpt {\n            excerpt,\n            next_excerpt_controls_present,\n            next_buffer_row,\n        }: StickyHeaderExcerpt<'_>,";
     let multiline_label_2 = "a\nb\nc\n";
@@ -18243,6 +18261,7 @@ async fn test_on_type_formatting_not_triggered(cx: &mut TestAppContext) {
         .downcast::<Editor>()
         .unwrap();
 
+    cx.executor().start_waiting();
     let fake_server = fake_servers.next().await.unwrap();
 
     fake_server.set_request_handler::<lsp::request::OnTypeFormatting, _, _>(
@@ -25440,7 +25459,6 @@ async fn test_html_linked_edits_on_completion(cx: &mut TestAppContext) {
         .unwrap();
 
     let fake_server = fake_servers.next().await.unwrap();
-    cx.run_until_parked();
     editor.update_in(cx, |editor, window, cx| {
         editor.set_text("<ad></ad>", window, cx);
         editor.change_selections(SelectionEffects::no_scroll(), window, cx, |selections| {

crates/editor/src/indent_guides.rs 🔗

@@ -106,7 +106,7 @@ impl Editor {
 
             // Try to resolve the indent in a short amount of time, otherwise move it to a background task.
             match cx
-                .foreground_executor()
+                .background_executor()
                 .block_with_timeout(Duration::from_micros(200), task)
             {
                 Ok(result) => state.active_indent_range = result,

crates/editor/src/inlays/inlay_hints.rs 🔗

@@ -292,7 +292,6 @@ impl Editor {
         };
 
         let mut visible_excerpts = self.visible_excerpts(true, cx);
-
         let mut invalidate_hints_for_buffers = HashSet::default();
         let ignore_previous_fetches = match reason {
             InlayHintRefreshReason::ModifiersChanged(_)
@@ -349,7 +348,6 @@ impl Editor {
         let mut buffers_to_query = HashMap::default();
         for (_, (buffer, buffer_version, visible_range)) in visible_excerpts {
             let buffer_id = buffer.read(cx).remote_id();
-
             if !self.registered_buffers.contains_key(&buffer_id) {
                 continue;
             }
@@ -3658,49 +3656,35 @@ let c = 3;"#
             })
             .await
             .unwrap();
+        let editor =
+            cx.add_window(|window, cx| Editor::for_buffer(buffer, Some(project), window, cx));
 
-        // Use a VisualTestContext and explicitly establish a viewport on the editor (the production
-        // trigger for `NewLinesShown` / inlay hint refresh) by setting visible line/column counts.
-        let (editor_entity, cx) =
-            cx.add_window_view(|window, cx| Editor::for_buffer(buffer, Some(project), window, cx));
-
-        editor_entity.update_in(cx, |editor, window, cx| {
-            // Establish a viewport. The exact values are not important for this test; we just need
-            // the editor to consider itself visible so the refresh pipeline runs.
-            editor.set_visible_line_count(50.0, window, cx);
-            editor.set_visible_column_count(120.0);
-
-            // Explicitly trigger a refresh now that the viewport exists.
-            editor.refresh_inlay_hints(InlayHintRefreshReason::NewLinesShown, cx);
-        });
         cx.executor().run_until_parked();
-
-        editor_entity.update_in(cx, |editor, window, cx| {
-            editor.change_selections(SelectionEffects::no_scroll(), window, cx, |s| {
-                s.select_ranges([Point::new(10, 0)..Point::new(10, 0)])
-            });
-        });
-        cx.executor().run_until_parked();
-
-        // Allow any async inlay hint request/response work to complete.
-        cx.executor().advance_clock(Duration::from_millis(100));
+        editor
+            .update(cx, |editor, window, cx| {
+                editor.change_selections(SelectionEffects::no_scroll(), window, cx, |s| {
+                    s.select_ranges([Point::new(10, 0)..Point::new(10, 0)])
+                })
+            })
+            .unwrap();
         cx.executor().run_until_parked();
-
-        editor_entity.update(cx, |editor, cx| {
-            let expected_hints = vec![
-                "move".to_string(),
-                "(".to_string(),
-                "&x".to_string(),
-                ") ".to_string(),
-                ") ".to_string(),
-            ];
-            assert_eq!(
-                expected_hints,
-                cached_hint_labels(editor, cx),
-                "Editor inlay hints should repeat server's order when placed at the same spot"
-            );
-            assert_eq!(expected_hints, visible_hint_labels(editor, cx));
-        });
+        editor
+            .update(cx, |editor, _window, cx| {
+                let expected_hints = vec![
+                    "move".to_string(),
+                    "(".to_string(),
+                    "&x".to_string(),
+                    ") ".to_string(),
+                    ") ".to_string(),
+                ];
+                assert_eq!(
+                    expected_hints,
+                    cached_hint_labels(editor, cx),
+                    "Editor inlay hints should repeat server's order when placed at the same spot"
+                );
+                assert_eq!(expected_hints, visible_hint_labels(editor, cx));
+            })
+            .unwrap();
     }
 
     #[gpui::test]

crates/editor/src/test/editor_lsp_test_context.rs 🔗

@@ -130,10 +130,6 @@ impl EditorLspTestContext {
         });
 
         let lsp = fake_servers.next().await.unwrap();
-
-        // Ensure the language server is fully registered with the buffer
-        cx.executor().run_until_parked();
-
         Self {
             cx: EditorTestContext {
                 cx,

crates/extension_host/benches/extension_compilation_benchmark.rs 🔗

@@ -11,7 +11,7 @@ use fs::{Fs, RealFs};
 use gpui::{TestAppContext, TestDispatcher};
 use http_client::{FakeHttpClient, Response};
 use node_runtime::NodeRuntime;
-
+use rand::{SeedableRng, rngs::StdRng};
 use reqwest_client::ReqwestClient;
 use serde_json::json;
 use settings::SettingsStore;
@@ -41,8 +41,8 @@ fn extension_benchmarks(c: &mut Criterion) {
             || wasm_bytes.clone(),
             |wasm_bytes| {
                 let _extension = cx
-                    .foreground_executor()
-                    .block_on(wasm_host.load_extension(wasm_bytes, &manifest, &cx.to_async()))
+                    .executor()
+                    .block(wasm_host.load_extension(wasm_bytes, &manifest, &cx.to_async()))
                     .unwrap();
             },
             BatchSize::SmallInput,
@@ -52,7 +52,7 @@ fn extension_benchmarks(c: &mut Criterion) {
 
 fn init() -> TestAppContext {
     const SEED: u64 = 9999;
-    let dispatcher = TestDispatcher::new(SEED);
+    let dispatcher = TestDispatcher::new(StdRng::seed_from_u64(SEED));
     let cx = TestAppContext::build(dispatcher, None);
     cx.executor().allow_parking();
     cx.update(|cx| {
@@ -72,8 +72,8 @@ fn wasm_bytes(cx: &TestAppContext, manifest: &mut ExtensionManifest, fs: Arc<dyn
         .parent()
         .unwrap()
         .join("extensions/test-extension");
-    cx.foreground_executor()
-        .block_on(extension_builder.compile_extension(
+    cx.executor()
+        .block(extension_builder.compile_extension(
             &path,
             manifest,
             CompileExtensionOptions { release: true },

crates/extension_host/src/extension_host.rs 🔗

@@ -282,7 +282,7 @@ impl ExtensionStore {
         // list of the installed extensions and the resources that they provide.
         // This index is loaded synchronously on startup.
         let (index_content, index_metadata, extensions_metadata) =
-            cx.foreground_executor().block_on(async {
+            cx.background_executor().block(async {
                 futures::join!(
                     this.fs.load(&this.index_path),
                     this.fs.metadata(&this.index_path),
@@ -336,38 +336,6 @@ impl ExtensionStore {
 
                 let mut index_changed = false;
                 let mut debounce_timer = cx.background_spawn(futures::future::pending()).fuse();
-
-                // If a test enables parking, it typically intends to allow real I/O progress while
-                // still using the scheduler-backed fake clock for deterministic execution.
-                // In that mode, debounce timers must use real time so that reloads can complete
-                // without requiring explicit fake-clock advancement.
-                #[cfg(any(test, feature = "test-support"))]
-                let use_real_time_debounce = cx
-                    .background_executor()
-                    .dispatcher()
-                    .as_test()
-                    .map(|test_dispatcher| test_dispatcher.scheduler().parking_allowed())
-                    .unwrap_or(false);
-
-                #[cfg(not(any(test, feature = "test-support")))]
-                let use_real_time_debounce = false;
-
-                fn schedule_debounce(
-                    use_real_time_debounce: bool,
-                    cx: &mut gpui::AsyncApp,
-                ) -> futures::future::Fuse<gpui::Task<()>> {
-                    if use_real_time_debounce {
-                        cx.background_spawn(async move {
-                            gpui::Timer::after(RELOAD_DEBOUNCE_DURATION).await;
-                        })
-                        .fuse()
-                    } else {
-                        cx.background_executor()
-                            .timer(RELOAD_DEBOUNCE_DURATION)
-                            .fuse()
-                    }
-                }
-
                 loop {
                     select_biased! {
                         _ = debounce_timer => {
@@ -383,15 +351,21 @@ impl ExtensionStore {
                             Self::update_remote_clients(&this, cx).await?;
                         }
                         _ = connection_registered_rx.next() => {
-                            debounce_timer = schedule_debounce(use_real_time_debounce, cx);
+                            debounce_timer = cx
+                                .background_executor()
+                                .timer(RELOAD_DEBOUNCE_DURATION)
+                                .fuse();
                         }
                         extension_id = reload_rx.next() => {
                             let Some(extension_id) = extension_id else { break; };
-                            this.update(cx, |this, _cx| {
+                            this.update(cx, |this, _| {
                                 this.modified_extensions.extend(extension_id);
                             })?;
                             index_changed = true;
-                            debounce_timer = schedule_debounce(use_real_time_debounce, cx);
+                            debounce_timer = cx
+                                .background_executor()
+                                .timer(RELOAD_DEBOUNCE_DURATION)
+                                .fuse();
                         }
                     }
                 }

crates/extension_host/src/extension_store_test.rs 🔗

@@ -534,27 +534,10 @@ async fn test_extension_store(cx: &mut TestAppContext) {
 
 #[gpui::test]
 async fn test_extension_store_with_test_extension(cx: &mut TestAppContext) {
+    log::info!("Initializing test");
     init_test(cx);
     cx.executor().allow_parking();
 
-    async fn await_or_timeout<T>(
-        what: &'static str,
-        seconds: u64,
-        future: impl std::future::Future<Output = T>,
-    ) -> T {
-        use futures::FutureExt as _;
-        use gpui::Timer;
-
-        let timeout = Timer::after(std::time::Duration::from_secs(seconds));
-
-        futures::select! {
-            output = future.fuse() => output,
-            _ = futures::FutureExt::fuse(timeout) => panic!(
-            "[test_extension_store_with_test_extension] timed out after {seconds}s while {what}"
-        )
-        }
-    }
-
     let root_dir = Path::new(env!("CARGO_MANIFEST_DIR"))
         .parent()
         .unwrap()
@@ -576,12 +559,9 @@ async fn test_extension_store_with_test_extension(cx: &mut TestAppContext) {
     let extensions_dir = extensions_tree.path().canonicalize().unwrap();
     let project_dir = project_dir.path().canonicalize().unwrap();
 
-    let project = await_or_timeout(
-        "awaiting Project::test",
-        5,
-        Project::test(fs.clone(), [project_dir.as_path()], cx),
-    )
-    .await;
+    log::info!("Setting up test");
+
+    let project = Project::test(fs.clone(), [project_dir.as_path()], cx).await;
 
     let proxy = Arc::new(ExtensionHostProxy::new());
     let theme_registry = Arc::new(ThemeRegistry::new(Box::new(())));
@@ -699,6 +679,8 @@ async fn test_extension_store_with_test_extension(cx: &mut TestAppContext) {
         )
     });
 
+    log::info!("Flushing events");
+
     // Ensure that debounces fire.
     let mut events = cx.events(&extension_store);
     let executor = cx.executor();
@@ -719,15 +701,12 @@ async fn test_extension_store_with_test_extension(cx: &mut TestAppContext) {
         .detach();
     });
 
-    await_or_timeout(
-        "awaiting install_dev_extension",
-        60,
-        extension_store.update(cx, |store, cx| {
+    extension_store
+        .update(cx, |store, cx| {
             store.install_dev_extension(test_extension_dir.clone(), cx)
-        }),
-    )
-    .await
-    .unwrap();
+        })
+        .await
+        .unwrap();
 
     let mut fake_servers = language_registry.register_fake_lsp_server(
         LanguageServerName("gleam".into()),
@@ -737,23 +716,15 @@ async fn test_extension_store_with_test_extension(cx: &mut TestAppContext) {
         },
         None,
     );
-    cx.executor().run_until_parked();
 
-    let (buffer, _handle) = await_or_timeout(
-        "awaiting open_local_buffer_with_lsp",
-        5,
-        project.update(cx, |project, cx| {
+    let (buffer, _handle) = project
+        .update(cx, |project, cx| {
             project.open_local_buffer_with_lsp(project_dir.join("test.gleam"), cx)
-        }),
-    )
-    .await
-    .unwrap();
-    cx.executor().run_until_parked();
-
-    let fake_server = await_or_timeout("awaiting first fake server spawn", 10, fake_servers.next())
+        })
         .await
         .unwrap();
 
+    let fake_server = fake_servers.next().await.unwrap();
     let work_dir = extensions_dir.join(format!("work/{test_extension_id}"));
     let expected_server_path = work_dir.join("gleam-v1.2.3/gleam");
     let expected_binary_contents = language_server_version.lock().binary_contents.clone();
@@ -767,30 +738,16 @@ async fn test_extension_store_with_test_extension(cx: &mut TestAppContext) {
     assert_eq!(fake_server.binary.path, expected_server_path);
     assert_eq!(fake_server.binary.arguments, [OsString::from("lsp")]);
     assert_eq!(
-        await_or_timeout(
-            "awaiting fs.load(expected_server_path)",
-            5,
-            fs.load(&expected_server_path)
-        )
-        .await
-        .unwrap(),
+        fs.load(&expected_server_path).await.unwrap(),
         expected_binary_contents
     );
     assert_eq!(language_server_version.lock().http_request_count, 2);
     assert_eq!(
         [
-            await_or_timeout("awaiting status_updates #1", 5, status_updates.next())
-                .await
-                .unwrap(),
-            await_or_timeout("awaiting status_updates #2", 5, status_updates.next())
-                .await
-                .unwrap(),
-            await_or_timeout("awaiting status_updates #3", 5, status_updates.next())
-                .await
-                .unwrap(),
-            await_or_timeout("awaiting status_updates #4", 5, status_updates.next())
-                .await
-                .unwrap(),
+            status_updates.next().await.unwrap(),
+            status_updates.next().await.unwrap(),
+            status_updates.next().await.unwrap(),
+            status_updates.next().await.unwrap(),
         ],
         [
             (
@@ -839,30 +796,16 @@ async fn test_extension_store_with_test_extension(cx: &mut TestAppContext) {
         ])))
     });
 
-    // `register_fake_lsp_server` can yield a server instance before the client has finished the LSP
-    // initialization handshake. Wait until we observe the client's `initialized` notification before
-    // issuing requests like completion.
-    await_or_timeout("awaiting LSP Initialized notification", 5, async {
-        fake_server
-            .clone()
-            .try_receive_notification::<lsp::notification::Initialized>()
-            .await;
-    })
-    .await;
-
-    let completion_labels = await_or_timeout(
-        "awaiting completions",
-        5,
-        project.update(cx, |project, cx| {
+    let completion_labels = project
+        .update(cx, |project, cx| {
             project.completions(&buffer, 0, DEFAULT_COMPLETION_CONTEXT, cx)
-        }),
-    )
-    .await
-    .unwrap()
-    .into_iter()
-    .flat_map(|response| response.completions)
-    .map(|c| c.label.text)
-    .collect::<Vec<_>>();
+        })
+        .await
+        .unwrap()
+        .into_iter()
+        .flat_map(|response| response.completions)
+        .map(|c| c.label.text)
+        .collect::<Vec<_>>();
     assert_eq!(
         completion_labels,
         [
@@ -886,68 +829,40 @@ async fn test_extension_store_with_test_extension(cx: &mut TestAppContext) {
 
     // The extension has cached the binary path, and does not attempt
     // to reinstall it.
-    let fake_server = await_or_timeout("awaiting second fake server spawn", 5, fake_servers.next())
-        .await
-        .unwrap();
+    let fake_server = fake_servers.next().await.unwrap();
     assert_eq!(fake_server.binary.path, expected_server_path);
     assert_eq!(
-        await_or_timeout(
-            "awaiting fs.load(expected_server_path) after restart",
-            5,
-            fs.load(&expected_server_path)
-        )
-        .await
-        .unwrap(),
+        fs.load(&expected_server_path).await.unwrap(),
         expected_binary_contents
     );
     assert_eq!(language_server_version.lock().http_request_count, 0);
 
     // Reload the extension, clearing its cache.
     // Start a new instance of the language server.
-    await_or_timeout(
-        "awaiting extension_store.reload(test-extension)",
-        5,
-        extension_store.update(cx, |store, cx| {
+    extension_store
+        .update(cx, |store, cx| {
             store.reload(Some("test-extension".into()), cx)
-        }),
-    )
-    .await;
+        })
+        .await;
     cx.executor().run_until_parked();
     project.update(cx, |project, cx| {
         project.restart_language_servers_for_buffers(vec![buffer.clone()], HashSet::default(), cx)
     });
 
     // The extension re-fetches the latest version of the language server.
-    let fake_server = await_or_timeout("awaiting third fake server spawn", 5, fake_servers.next())
-        .await
-        .unwrap();
+    let fake_server = fake_servers.next().await.unwrap();
     let new_expected_server_path =
         extensions_dir.join(format!("work/{test_extension_id}/gleam-v2.0.0/gleam"));
     let expected_binary_contents = language_server_version.lock().binary_contents.clone();
     assert_eq!(fake_server.binary.path, new_expected_server_path);
     assert_eq!(fake_server.binary.arguments, [OsString::from("lsp")]);
     assert_eq!(
-        await_or_timeout(
-            "awaiting fs.load(new_expected_server_path)",
-            5,
-            fs.load(&new_expected_server_path)
-        )
-        .await
-        .unwrap(),
+        fs.load(&new_expected_server_path).await.unwrap(),
         expected_binary_contents
     );
 
     // The old language server directory has been cleaned up.
-    assert!(
-        await_or_timeout(
-            "awaiting fs.metadata(expected_server_path)",
-            5,
-            fs.metadata(&expected_server_path)
-        )
-        .await
-        .unwrap()
-        .is_none()
-    );
+    assert!(fs.metadata(&expected_server_path).await.unwrap().is_none());
 }
 
 fn init_test(cx: &mut TestAppContext) {

crates/fs/src/fake_git_repo.rs 🔗

@@ -14,15 +14,21 @@ use git::{
         UnmergedStatus,
     },
 };
-use gpui::{AsyncApp, BackgroundExecutor, SharedString, Task};
+use gpui::{AsyncApp, BackgroundExecutor, SharedString, Task, TaskLabel};
 use ignore::gitignore::GitignoreBuilder;
 use parking_lot::Mutex;
 use rope::Rope;
 use smol::future::FutureExt as _;
-use std::{path::PathBuf, sync::Arc};
+use std::{
+    path::PathBuf,
+    sync::{Arc, LazyLock},
+};
 use text::LineEnding;
 use util::{paths::PathStyle, rel_path::RelPath};
 
+pub static LOAD_INDEX_TEXT_TASK: LazyLock<TaskLabel> = LazyLock::new(TaskLabel::new);
+pub static LOAD_HEAD_TEXT_TASK: LazyLock<TaskLabel> = LazyLock::new(TaskLabel::new);
+
 #[derive(Clone)]
 pub struct FakeGitRepository {
     pub(crate) fs: Arc<FakeFs>,
@@ -98,7 +104,9 @@ impl GitRepository for FakeGitRepository {
                 .context("not present in index")
                 .cloned()
         });
-        self.executor.spawn(async move { fut.await.ok() }).boxed()
+        self.executor
+            .spawn_labeled(*LOAD_INDEX_TEXT_TASK, async move { fut.await.ok() })
+            .boxed()
     }
 
     fn load_committed_text(&self, path: RepoPath) -> BoxFuture<'_, Option<String>> {
@@ -109,7 +117,9 @@ impl GitRepository for FakeGitRepository {
                 .context("not present in HEAD")
                 .cloned()
         });
-        self.executor.spawn(async move { fut.await.ok() }).boxed()
+        self.executor
+            .spawn_labeled(*LOAD_HEAD_TEXT_TASK, async move { fut.await.ok() })
+            .boxed()
     }
 
     fn load_blob_content(&self, oid: git::Oid) -> BoxFuture<'_, Result<String>> {
@@ -655,7 +665,7 @@ impl GitRepository for FakeGitRepository {
         let repository_dir_path = self.repository_dir_path.parent().unwrap().to_path_buf();
         async move {
             executor.simulate_random_delay().await;
-            let oid = git::Oid::random(&mut *executor.rng().lock());
+            let oid = git::Oid::random(&mut executor.rng());
             let entry = fs.entry(&repository_dir_path)?;
             checkpoints.lock().insert(oid, entry);
             Ok(GitRepositoryCheckpoint { commit_sha: oid })

crates/fs/src/fs.rs 🔗

@@ -63,6 +63,9 @@ use smol::io::AsyncReadExt;
 #[cfg(any(test, feature = "test-support"))]
 use std::ffi::OsStr;
 
+#[cfg(any(test, feature = "test-support"))]
+pub use fake_git_repo::{LOAD_HEAD_TEXT_TASK, LOAD_INDEX_TEXT_TASK};
+
 pub trait Watcher: Send + Sync {
     fn add(&self, path: &Path) -> Result<()>;
     fn remove(&self, path: &Path) -> Result<()>;

crates/gpui/Cargo.toml 🔗

@@ -107,12 +107,10 @@ num_cpus = "1.13"
 parking = "2.0.0"
 parking_lot.workspace = true
 postage.workspace = true
-chrono.workspace = true
 profiling.workspace = true
 rand.workspace = true
 raw-window-handle = "0.6"
 refineable.workspace = true
-scheduler.workspace = true
 resvg = { version = "0.45.0", default-features = false, features = [
     "text",
     "system-fonts",
@@ -162,6 +160,7 @@ objc2-metal = { version = "0.3", optional = true }
 mach2.workspace = true
 #TODO: replace with "objc2"
 metal.workspace = true
+flume = "0.11"
 
 [target.'cfg(any(target_os = "linux", target_os = "freebsd", target_os = "macos"))'.dependencies]
 pathfinder_geometry = "0.5"
@@ -171,6 +170,7 @@ scap = { workspace = true, optional = true }
 
 [target.'cfg(any(target_os = "linux", target_os = "freebsd"))'.dependencies]
 # Always used
+flume = "0.11"
 oo7 = { version = "0.5.0", default-features = false, features = [
     "async-std",
     "native_crypto",
@@ -236,6 +236,7 @@ xim = { git = "https://github.com/zed-industries/xim-rs.git", rev = "16f35a2c881
 x11-clipboard = { version = "0.9.3", optional = true }
 
 [target.'cfg(target_os = "windows")'.dependencies]
+flume = "0.11"
 rand.workspace = true
 windows.workspace = true
 windows-core.workspace = true
@@ -251,7 +252,6 @@ lyon = { version = "1.0", features = ["extra"] }
 pretty_assertions.workspace = true
 rand.workspace = true
 reqwest_client = { workspace = true, features = ["test-support"] }
-scheduler = { workspace = true, features = ["test-support"] }
 unicode-segmentation.workspace = true
 util = { workspace = true, features = ["test-support"] }
 

crates/gpui/src/app.rs 🔗

@@ -36,11 +36,11 @@ pub use visual_test_context::*;
 #[cfg(any(feature = "inspector", debug_assertions))]
 use crate::InspectorElementRegistry;
 use crate::{
-    Action, ActionBuildError, ActionRegistry, Any, AnyView, AnyWindowHandle, AppContext, Arena,
-    Asset, AssetSource, BackgroundExecutor, Bounds, ClipboardItem, CursorStyle, DispatchPhase,
-    DisplayId, EventEmitter, FocusHandle, FocusMap, ForegroundExecutor, Global, KeyBinding,
-    KeyContext, Keymap, Keystroke, LayoutId, Menu, MenuItem, OwnedMenu, PathPromptOptions, Pixels,
-    Platform, PlatformDisplay, PlatformKeyboardLayout, PlatformKeyboardMapper, Point, Priority,
+    Action, ActionBuildError, ActionRegistry, Any, AnyView, AnyWindowHandle, AppContext, Asset,
+    AssetSource, BackgroundExecutor, Bounds, ClipboardItem, CursorStyle, DispatchPhase, DisplayId,
+    EventEmitter, FocusHandle, FocusMap, ForegroundExecutor, Global, KeyBinding, KeyContext,
+    Keymap, Keystroke, LayoutId, Menu, MenuItem, OwnedMenu, PathPromptOptions, Pixels, Platform,
+    PlatformDisplay, PlatformKeyboardLayout, PlatformKeyboardMapper, Point, Priority,
     PromptBuilder, PromptButton, PromptHandle, PromptLevel, Render, RenderImage,
     RenderablePromptHandle, Reservation, ScreenCaptureSource, SharedString, SubscriberSet,
     Subscription, SvgRenderer, Task, TextRenderingMode, TextSystem, Window, WindowAppearance,
@@ -138,8 +138,10 @@ impl Application {
         #[cfg(any(test, feature = "test-support"))]
         log::info!("GPUI was compiled in test mode");
 
+        let liveness = Arc::new(());
         Self(App::new_app(
-            current_platform(false),
+            current_platform(false, Arc::downgrade(&liveness)),
+            liveness,
             Arc::new(()),
             Arc::new(NullHttpClient),
         ))
@@ -149,8 +151,10 @@ impl Application {
     /// but makes it possible to run an application in an context like
     /// SSH, where GUI applications are not allowed.
     pub fn headless() -> Self {
+        let liveness = Arc::new(());
         Self(App::new_app(
-            current_platform(true),
+            current_platform(true, Arc::downgrade(&liveness)),
+            liveness,
             Arc::new(()),
             Arc::new(NullHttpClient),
         ))
@@ -584,6 +588,7 @@ impl GpuiMode {
 /// You need a reference to an `App` to access the state of a [Entity].
 pub struct App {
     pub(crate) this: Weak<AppCell>,
+    pub(crate) _liveness: Arc<()>,
     pub(crate) platform: Rc<dyn Platform>,
     pub(crate) mode: GpuiMode,
     text_system: Arc<TextSystem>,
@@ -639,15 +644,13 @@ pub struct App {
     pub(crate) text_rendering_mode: Rc<Cell<TextRenderingMode>>,
     quit_mode: QuitMode,
     quitting: bool,
-    /// Per-App element arena. This isolates element allocations between different
-    /// App instances (important for tests where multiple Apps run concurrently).
-    pub(crate) element_arena: RefCell<Arena>,
 }
 
 impl App {
     #[allow(clippy::new_ret_no_self)]
     pub(crate) fn new_app(
         platform: Rc<dyn Platform>,
+        liveness: Arc<()>,
         asset_source: Arc<dyn AssetSource>,
         http_client: Arc<dyn HttpClient>,
     ) -> Rc<AppCell> {
@@ -666,6 +669,7 @@ impl App {
         let app = Rc::new_cyclic(|this| AppCell {
             app: RefCell::new(App {
                 this: this.clone(),
+                _liveness: liveness,
                 platform: platform.clone(),
                 text_system,
                 text_rendering_mode: Rc::new(Cell::new(TextRenderingMode::default())),
@@ -719,7 +723,6 @@ impl App {
 
                 #[cfg(any(test, feature = "test-support", debug_assertions))]
                 name: None,
-                element_arena: RefCell::new(Arena::new(1024 * 1024)),
             }),
         });
 
@@ -766,7 +769,7 @@ impl App {
 
         let futures = futures::future::join_all(futures);
         if self
-            .foreground_executor
+            .background_executor
             .block_with_timeout(SHUTDOWN_TIMEOUT, futures)
             .is_err()
         {
@@ -2539,13 +2542,6 @@ impl<'a, T> Drop for GpuiBorrow<'a, T> {
     }
 }
 
-impl Drop for App {
-    fn drop(&mut self) {
-        self.foreground_executor.close();
-        self.background_executor.close();
-    }
-}
-
 #[cfg(test)]
 mod test {
     use std::{cell::RefCell, rc::Rc};

crates/gpui/src/app/test_context.rs 🔗

@@ -9,7 +9,7 @@ use crate::{
 };
 use anyhow::{anyhow, bail};
 use futures::{Stream, StreamExt, channel::oneshot};
-
+use rand::{SeedableRng, rngs::StdRng};
 use std::{
     cell::RefCell, future::Future, ops::Deref, path::PathBuf, rc::Rc, sync::Arc, time::Duration,
 };
@@ -116,14 +116,16 @@ impl TestAppContext {
     /// Creates a new `TestAppContext`. Usually you can rely on `#[gpui::test]` to do this for you.
     pub fn build(dispatcher: TestDispatcher, fn_name: Option<&'static str>) -> Self {
         let arc_dispatcher = Arc::new(dispatcher.clone());
+        let liveness = std::sync::Arc::new(());
         let background_executor = BackgroundExecutor::new(arc_dispatcher.clone());
-        let foreground_executor = ForegroundExecutor::new(arc_dispatcher);
+        let foreground_executor =
+            ForegroundExecutor::new(arc_dispatcher, Arc::downgrade(&liveness));
         let platform = TestPlatform::new(background_executor.clone(), foreground_executor.clone());
         let asset_source = Arc::new(());
         let http_client = http_client::FakeHttpClient::with_404_response();
         let text_system = Arc::new(TextSystem::new(platform.text_system()));
 
-        let app = App::new_app(platform.clone(), asset_source, http_client);
+        let app = App::new_app(platform.clone(), liveness, asset_source, http_client);
         app.borrow_mut().mode = GpuiMode::test();
 
         Self {
@@ -145,7 +147,7 @@ impl TestAppContext {
 
     /// Create a single TestAppContext, for non-multi-client tests
     pub fn single() -> Self {
-        let dispatcher = TestDispatcher::new(0);
+        let dispatcher = TestDispatcher::new(StdRng::seed_from_u64(0));
         Self::build(dispatcher, None)
     }
 
@@ -654,9 +656,11 @@ impl<V> Entity<V> {
                         }
                     }
 
+                    cx.borrow().background_executor().start_waiting();
                     rx.recv()
                         .await
                         .expect("view dropped with pending condition");
+                    cx.borrow().background_executor().finish_waiting();
                 }
             })
             .await

crates/gpui/src/app/visual_test_context.rs 🔗

@@ -57,9 +57,12 @@ impl VisualTestAppContext {
             .and_then(|s| s.parse().ok())
             .unwrap_or(0);
 
+        // Create liveness for task cancellation
+        let liveness = Arc::new(());
+
         // Create a visual test platform that combines real Mac rendering
         // with controllable TestDispatcher for deterministic task scheduling
-        let platform = Rc::new(VisualTestPlatform::new(seed));
+        let platform = Rc::new(VisualTestPlatform::new(seed, Arc::downgrade(&liveness)));
 
         // Get the dispatcher and executors from the platform
         let dispatcher = platform.dispatcher().clone();
@@ -70,7 +73,7 @@ impl VisualTestAppContext {
 
         let http_client = http_client::FakeHttpClient::with_404_response();
 
-        let mut app = App::new_app(platform.clone(), asset_source, http_client);
+        let mut app = App::new_app(platform.clone(), liveness, asset_source, http_client);
         app.borrow_mut().mode = GpuiMode::test();
 
         Self {

crates/gpui/src/element.rs 🔗

@@ -32,9 +32,9 @@
 //! your own custom layout algorithm or rendering a code editor.
 
 use crate::{
-    App, ArenaBox, AvailableSpace, Bounds, Context, DispatchNodeId, ElementId, FocusHandle,
-    InspectorElementId, LayoutId, Pixels, Point, Size, Style, Window, util::FluentBuilder,
-    window::with_element_arena,
+    App, ArenaBox, AvailableSpace, Bounds, Context, DispatchNodeId, ELEMENT_ARENA, ElementId,
+    FocusHandle, InspectorElementId, LayoutId, Pixels, Point, Size, Style, Window,
+    util::FluentBuilder,
 };
 use derive_more::{Deref, DerefMut};
 use std::{
@@ -579,7 +579,8 @@ impl AnyElement {
         E: 'static + Element,
         E::RequestLayoutState: Any,
     {
-        let element = with_element_arena(|arena| arena.alloc(|| Drawable::new(element)))
+        let element = ELEMENT_ARENA
+            .with_borrow_mut(|arena| arena.alloc(|| Drawable::new(element)))
             .map(|element| element as &mut dyn ElementObject);
         AnyElement(element)
     }

crates/gpui/src/executor.rs 🔗

@@ -1,38 +1,99 @@
-use crate::{App, PlatformDispatcher, PlatformScheduler};
+use crate::{App, PlatformDispatcher, RunnableMeta, RunnableVariant, TaskTiming, profiler};
+use async_task::Runnable;
 use futures::channel::mpsc;
-use scheduler::Scheduler;
+use parking_lot::{Condvar, Mutex};
 use smol::prelude::*;
 use std::{
     fmt::Debug,
-    future::Future,
     marker::PhantomData,
-    mem,
+    mem::{self, ManuallyDrop},
+    num::NonZeroUsize,
+    panic::Location,
     pin::Pin,
     rc::Rc,
-    sync::Arc,
+    sync::{
+        Arc,
+        atomic::{AtomicUsize, Ordering},
+    },
+    task::{Context, Poll},
+    thread::{self, ThreadId},
     time::{Duration, Instant},
 };
-use util::TryFutureExt;
+use util::TryFutureExt as _;
+use waker_fn::waker_fn;
 
-pub use scheduler::{FallibleTask, Priority};
+#[cfg(any(test, feature = "test-support"))]
+use rand::rngs::StdRng;
 
 /// A pointer to the executor that is currently running,
 /// for spawning background tasks.
 #[derive(Clone)]
 pub struct BackgroundExecutor {
-    inner: scheduler::BackgroundExecutor,
-    dispatcher: Arc<dyn PlatformDispatcher>,
+    #[doc(hidden)]
+    pub dispatcher: Arc<dyn PlatformDispatcher>,
 }
 
 /// A pointer to the executor that is currently running,
 /// for spawning tasks on the main thread.
+///
+/// This is intentionally `!Send` via the `not_send` marker field. This is because
+/// `ForegroundExecutor::spawn` does not require `Send` but checks at runtime that the future is
+/// only polled from the same thread it was spawned from. These checks would fail when spawning
+/// foreground tasks from background threads.
 #[derive(Clone)]
 pub struct ForegroundExecutor {
-    inner: scheduler::ForegroundExecutor,
-    dispatcher: Arc<dyn PlatformDispatcher>,
+    #[doc(hidden)]
+    pub dispatcher: Arc<dyn PlatformDispatcher>,
+    liveness: std::sync::Weak<()>,
     not_send: PhantomData<Rc<()>>,
 }
 
+/// Realtime task priority
+#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)]
+#[repr(u8)]
+pub enum RealtimePriority {
+    /// Audio task
+    Audio,
+    /// Other realtime task
+    #[default]
+    Other,
+}
+
+/// Task priority
+#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)]
+#[repr(u8)]
+pub enum Priority {
+    /// Realtime priority
+    ///
+    /// Spawning a task with this priority will spin it off on a separate thread dedicated just to that task.
+    Realtime(RealtimePriority),
+    /// High priority
+    ///
+    /// Only use for tasks that are critical to the user experience / responsiveness of the editor.
+    High,
+    /// Medium priority, probably suits most of your use cases.
+    #[default]
+    Medium,
+    /// Low priority
+    ///
+    /// Prioritize this for background work that can come in large quantities
+    /// to not starve the executor of resources for high priority tasks
+    Low,
+}
+
+impl Priority {
+    #[allow(dead_code)]
+    pub(crate) const fn probability(&self) -> u32 {
+        match self {
+            // realtime priorities are not considered for probability scheduling
+            Priority::Realtime(_) => 0,
+            Priority::High => 60,
+            Priority::Medium => 30,
+            Priority::Low => 10,
+        }
+    }
+}
+
 /// Task is a primitive that allows work to happen in the background.
 ///
 /// It implements [`Future`] so you can `.await` on it.
@@ -41,57 +102,63 @@ pub struct ForegroundExecutor {
 /// the task to continue running, but with no way to return a value.
 #[must_use]
 #[derive(Debug)]
-pub struct Task<T>(scheduler::Task<T>);
+pub struct Task<T>(TaskState<T>);
+
+#[derive(Debug)]
+enum TaskState<T> {
+    /// A task that is ready to return a value
+    Ready(Option<T>),
+
+    /// A task that is currently running.
+    Spawned(async_task::Task<T, RunnableMeta>),
+}
 
 impl<T> Task<T> {
-    /// Creates a new task that will resolve with the value.
+    /// Creates a new task that will resolve with the value
     pub fn ready(val: T) -> Self {
-        Task(scheduler::Task::ready(val))
+        Task(TaskState::Ready(Some(val)))
     }
 
-    /// Returns true if the task has completed or was created with `Task::ready`.
-    pub fn is_ready(&self) -> bool {
-        self.0.is_ready()
-    }
-
-    /// Detaching a task runs it to completion in the background.
+    /// Detaching a task runs it to completion in the background
     pub fn detach(self) {
-        self.0.detach()
-    }
-
-    /// Wraps a scheduler::Task.
-    pub fn from_scheduler(task: scheduler::Task<T>) -> Self {
-        Task(task)
+        match self {
+            Task(TaskState::Ready(_)) => {}
+            Task(TaskState::Spawned(task)) => task.detach(),
+        }
     }
 
     /// Converts this task into a fallible task that returns `Option<T>`.
     ///
     /// Unlike the standard `Task<T>`, a [`FallibleTask`] will return `None`
-    /// if the task was cancelled.
+    /// if the app was dropped while the task is executing.
     ///
     /// # Example
     ///
     /// ```ignore
-    /// // Background task that gracefully handles cancellation:
+    /// // Background task that gracefully handles app shutdown:
     /// cx.background_spawn(async move {
     ///     let result = foreground_task.fallible().await;
     ///     if let Some(value) = result {
     ///         // Process the value
     ///     }
-    ///     // If None, task was cancelled - just exit gracefully
+    ///     // If None, app was shut down - just exit gracefully
     /// }).detach();
     /// ```
     pub fn fallible(self) -> FallibleTask<T> {
-        self.0.fallible()
+        FallibleTask(match self.0 {
+            TaskState::Ready(val) => FallibleTaskState::Ready(val),
+            TaskState::Spawned(task) => FallibleTaskState::Spawned(task.fallible()),
+        })
     }
 }
 
-impl<T, E> Task<Result<T, E>>
+impl<E, T> Task<Result<T, E>>
 where
     T: 'static,
     E: 'static + Debug,
 {
-    /// Run the task to completion in the background and log any errors that occur.
+    /// Run the task to completion in the background and log any
+    /// errors that occur.
     #[track_caller]
     pub fn detach_and_log_err(self, cx: &App) {
         let location = core::panic::Location::caller();
@@ -101,42 +168,102 @@ where
     }
 }
 
-impl<T> std::future::Future for Task<T> {
+impl<T> Future for Task<T> {
     type Output = T;
 
-    fn poll(
-        self: std::pin::Pin<&mut Self>,
-        cx: &mut std::task::Context<'_>,
-    ) -> std::task::Poll<Self::Output> {
-        // SAFETY: Task is a repr(transparent) wrapper around scheduler::Task,
-        // and we're just projecting the pin through to the inner task.
-        let inner = unsafe { self.map_unchecked_mut(|t| &mut t.0) };
-        inner.poll(cx)
+    fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
+        match unsafe { self.get_unchecked_mut() } {
+            Task(TaskState::Ready(val)) => Poll::Ready(val.take().unwrap()),
+            Task(TaskState::Spawned(task)) => task.poll(cx),
+        }
     }
 }
 
-impl BackgroundExecutor {
-    /// Creates a new BackgroundExecutor from the given PlatformDispatcher.
-    pub fn new(dispatcher: Arc<dyn PlatformDispatcher>) -> Self {
-        #[cfg(any(test, feature = "test-support"))]
-        let scheduler: Arc<dyn Scheduler> = if let Some(test_dispatcher) = dispatcher.as_test() {
-            test_dispatcher.scheduler().clone()
-        } else {
-            Arc::new(PlatformScheduler::new(dispatcher.clone()))
-        };
+/// A task that returns `Option<T>` instead of panicking when cancelled.
+#[must_use]
+pub struct FallibleTask<T>(FallibleTaskState<T>);
 
-        #[cfg(not(any(test, feature = "test-support")))]
-        let scheduler: Arc<dyn Scheduler> = Arc::new(PlatformScheduler::new(dispatcher.clone()));
+enum FallibleTaskState<T> {
+    /// A task that is ready to return a value
+    Ready(Option<T>),
 
-        Self {
-            inner: scheduler::BackgroundExecutor::new(scheduler),
-            dispatcher,
+    /// A task that is currently running (wraps async_task::FallibleTask).
+    Spawned(async_task::FallibleTask<T, RunnableMeta>),
+}
+
+impl<T> FallibleTask<T> {
+    /// Creates a new fallible task that will resolve with the value.
+    pub fn ready(val: T) -> Self {
+        FallibleTask(FallibleTaskState::Ready(Some(val)))
+    }
+
+    /// Detaching a task runs it to completion in the background.
+    pub fn detach(self) {
+        match self.0 {
+            FallibleTaskState::Ready(_) => {}
+            FallibleTaskState::Spawned(task) => task.detach(),
+        }
+    }
+}
+
+impl<T> Future for FallibleTask<T> {
+    type Output = Option<T>;
+
+    fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
+        match unsafe { self.get_unchecked_mut() } {
+            FallibleTask(FallibleTaskState::Ready(val)) => Poll::Ready(val.take()),
+            FallibleTask(FallibleTaskState::Spawned(task)) => Pin::new(task).poll(cx),
+        }
+    }
+}
+
+impl<T> std::fmt::Debug for FallibleTask<T> {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        match &self.0 {
+            FallibleTaskState::Ready(_) => f.debug_tuple("FallibleTask::Ready").finish(),
+            FallibleTaskState::Spawned(task) => {
+                f.debug_tuple("FallibleTask::Spawned").field(task).finish()
+            }
         }
     }
+}
+
+/// A task label is an opaque identifier that you can use to
+/// refer to a task in tests.
+#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
+pub struct TaskLabel(NonZeroUsize);
+
+impl Default for TaskLabel {
+    fn default() -> Self {
+        Self::new()
+    }
+}
+
+impl TaskLabel {
+    /// Construct a new task label.
+    pub fn new() -> Self {
+        static NEXT_TASK_LABEL: AtomicUsize = AtomicUsize::new(1);
+        Self(
+            NEXT_TASK_LABEL
+                .fetch_add(1, Ordering::SeqCst)
+                .try_into()
+                .unwrap(),
+        )
+    }
+}
+
+type AnyLocalFuture<R> = Pin<Box<dyn 'static + Future<Output = R>>>;
 
-    /// Close this executor. Tasks will not run after this is called.
-    pub fn close(&self) {
-        self.inner.close();
+type AnyFuture<R> = Pin<Box<dyn 'static + Send + Future<Output = R>>>;
+
+/// BackgroundExecutor lets you run things on background threads.
+/// In production this is a thread pool with no ordering guarantees.
+/// In tests this is simulated by running tasks one by one in a deterministic
+/// (but arbitrary) order controlled by the `SEED` environment variable.
+impl BackgroundExecutor {
+    #[doc(hidden)]
+    pub fn new(dispatcher: Arc<dyn PlatformDispatcher>) -> Self {
+        Self { dispatcher }
     }
 
     /// Enqueues the given future to be run to completion on a background thread.
@@ -148,10 +275,7 @@ impl BackgroundExecutor {
         self.spawn_with_priority(Priority::default(), future)
     }
 
-    /// Enqueues the given future to be run to completion on a background thread with the given priority.
-    ///
-    /// When `Priority::RealtimeAudio` is used, the task runs on a dedicated thread with
-    /// realtime scheduling priority, suitable for audio processing.
+    /// Enqueues the given future to be run to completion on a background thread.
     #[track_caller]
     pub fn spawn_with_priority<R>(
         &self,
@@ -161,11 +285,7 @@ impl BackgroundExecutor {
     where
         R: Send + 'static,
     {
-        if priority == Priority::RealtimeAudio {
-            Task::from_scheduler(self.inner.spawn_realtime(future))
-        } else {
-            Task::from_scheduler(self.inner.spawn_with_priority(priority, future))
-        }
+        self.spawn_internal::<R>(Box::pin(future), None, priority)
     }
 
     /// Enqueues the given future to be run to completion on a background thread and blocking the current task on it.
@@ -176,10 +296,8 @@ impl BackgroundExecutor {
     where
         R: Send,
     {
-        use crate::RunnableMeta;
-        use parking_lot::{Condvar, Mutex};
-        use std::sync::{Arc, atomic::AtomicBool};
-
+        // We need to ensure that cancellation of the parent task does not drop the environment
+        // before the our own task has completed or got cancelled.
         struct NotifyOnDrop<'a>(&'a (Condvar, Mutex<bool>));
 
         impl Drop for NotifyOnDrop<'_> {
@@ -202,21 +320,27 @@ impl BackgroundExecutor {
 
         let dispatcher = self.dispatcher.clone();
         let location = core::panic::Location::caller();
-        let closed = Arc::new(AtomicBool::new(false));
 
         let pair = &(Condvar::new(), Mutex::new(false));
         let _wait_guard = WaitOnDrop(pair);
 
         let (runnable, task) = unsafe {
             async_task::Builder::new()
-                .metadata(RunnableMeta { location, closed })
+                .metadata(RunnableMeta {
+                    location,
+                    app: None,
+                })
                 .spawn_unchecked(
                     move |_| async {
                         let _notify_guard = NotifyOnDrop(pair);
                         future.await
                     },
                     move |runnable| {
-                        dispatcher.dispatch(runnable, Priority::default());
+                        dispatcher.dispatch(
+                            RunnableVariant::Meta(runnable),
+                            None,
+                            Priority::default(),
+                        )
                     },
                 )
         };
@@ -224,6 +348,279 @@ impl BackgroundExecutor {
         task.await
     }
 
+    /// Enqueues the given future to be run to completion on a background thread.
+    /// The given label can be used to control the priority of the task in tests.
+    #[track_caller]
+    pub fn spawn_labeled<R>(
+        &self,
+        label: TaskLabel,
+        future: impl Future<Output = R> + Send + 'static,
+    ) -> Task<R>
+    where
+        R: Send + 'static,
+    {
+        self.spawn_internal::<R>(Box::pin(future), Some(label), Priority::default())
+    }
+
+    #[track_caller]
+    fn spawn_internal<R: Send + 'static>(
+        &self,
+        future: AnyFuture<R>,
+        label: Option<TaskLabel>,
+        priority: Priority,
+    ) -> Task<R> {
+        let dispatcher = self.dispatcher.clone();
+        let (runnable, task) = if let Priority::Realtime(realtime) = priority {
+            let location = core::panic::Location::caller();
+            let (mut tx, rx) = flume::bounded::<Runnable<RunnableMeta>>(1);
+
+            dispatcher.spawn_realtime(
+                realtime,
+                Box::new(move || {
+                    while let Ok(runnable) = rx.recv() {
+                        let start = Instant::now();
+                        let location = runnable.metadata().location;
+                        let mut timing = TaskTiming {
+                            location,
+                            start,
+                            end: None,
+                        };
+                        profiler::add_task_timing(timing);
+
+                        runnable.run();
+
+                        let end = Instant::now();
+                        timing.end = Some(end);
+                        profiler::add_task_timing(timing);
+                    }
+                }),
+            );
+
+            async_task::Builder::new()
+                .metadata(RunnableMeta {
+                    location,
+                    app: None,
+                })
+                .spawn(
+                    move |_| future,
+                    move |runnable| {
+                        let _ = tx.send(runnable);
+                    },
+                )
+        } else {
+            let location = core::panic::Location::caller();
+            async_task::Builder::new()
+                .metadata(RunnableMeta {
+                    location,
+                    app: None,
+                })
+                .spawn(
+                    move |_| future,
+                    move |runnable| {
+                        dispatcher.dispatch(RunnableVariant::Meta(runnable), label, priority)
+                    },
+                )
+        };
+
+        runnable.schedule();
+        Task(TaskState::Spawned(task))
+    }
+
+    /// Used by the test harness to run an async test in a synchronous fashion.
+    #[cfg(any(test, feature = "test-support"))]
+    #[track_caller]
+    pub fn block_test<R>(&self, future: impl Future<Output = R>) -> R {
+        if let Ok(value) = self.block_internal(false, future, None) {
+            value
+        } else {
+            unreachable!()
+        }
+    }
+
+    /// Block the current thread until the given future resolves.
+    /// Consider using `block_with_timeout` instead.
+    pub fn block<R>(&self, future: impl Future<Output = R>) -> R {
+        if let Ok(value) = self.block_internal(true, future, None) {
+            value
+        } else {
+            unreachable!()
+        }
+    }
+
+    #[cfg(not(any(test, feature = "test-support")))]
+    pub(crate) fn block_internal<Fut: Future>(
+        &self,
+        _background_only: bool,
+        future: Fut,
+        timeout: Option<Duration>,
+    ) -> Result<Fut::Output, impl Future<Output = Fut::Output> + use<Fut>> {
+        use std::time::Instant;
+
+        let mut future = Box::pin(future);
+        if timeout == Some(Duration::ZERO) {
+            return Err(future);
+        }
+        let deadline = timeout.map(|timeout| Instant::now() + timeout);
+
+        let parker = parking::Parker::new();
+        let unparker = parker.unparker();
+        let waker = waker_fn(move || {
+            unparker.unpark();
+        });
+        let mut cx = std::task::Context::from_waker(&waker);
+
+        loop {
+            match future.as_mut().poll(&mut cx) {
+                Poll::Ready(result) => return Ok(result),
+                Poll::Pending => {
+                    let timeout =
+                        deadline.map(|deadline| deadline.saturating_duration_since(Instant::now()));
+                    if let Some(timeout) = timeout {
+                        if !parker.park_timeout(timeout)
+                            && deadline.is_some_and(|deadline| deadline < Instant::now())
+                        {
+                            return Err(future);
+                        }
+                    } else {
+                        parker.park();
+                    }
+                }
+            }
+        }
+    }
+
+    #[cfg(any(test, feature = "test-support"))]
+    #[track_caller]
+    pub(crate) fn block_internal<Fut: Future>(
+        &self,
+        background_only: bool,
+        future: Fut,
+        timeout: Option<Duration>,
+    ) -> Result<Fut::Output, impl Future<Output = Fut::Output> + use<Fut>> {
+        use std::sync::atomic::AtomicBool;
+        use std::time::Instant;
+
+        use parking::Parker;
+
+        let mut future = Box::pin(future);
+        if timeout == Some(Duration::ZERO) {
+            return Err(future);
+        }
+
+        // When using a real platform (e.g., MacPlatform for visual tests that need actual
+        // Metal rendering), there's no test dispatcher. In this case, we block the thread
+        // directly by polling the future and parking until woken. This is required for
+        // VisualTestAppContext which uses real platform rendering but still needs blocking
+        // behavior for code paths like editor initialization that call block_with_timeout.
+        let Some(dispatcher) = self.dispatcher.as_test() else {
+            let deadline = timeout.map(|timeout| Instant::now() + timeout);
+
+            let parker = Parker::new();
+            let unparker = parker.unparker();
+            let waker = waker_fn(move || {
+                unparker.unpark();
+            });
+            let mut cx = std::task::Context::from_waker(&waker);
+
+            loop {
+                match future.as_mut().poll(&mut cx) {
+                    Poll::Ready(result) => return Ok(result),
+                    Poll::Pending => {
+                        let timeout = deadline
+                            .map(|deadline| deadline.saturating_duration_since(Instant::now()));
+                        if let Some(timeout) = timeout {
+                            if !parker.park_timeout(timeout)
+                                && deadline.is_some_and(|deadline| deadline < Instant::now())
+                            {
+                                return Err(future);
+                            }
+                        } else {
+                            parker.park();
+                        }
+                    }
+                }
+            }
+        };
+
+        let mut max_ticks = if timeout.is_some() {
+            dispatcher.gen_block_on_ticks()
+        } else {
+            usize::MAX
+        };
+
+        let parker = Parker::new();
+        let unparker = parker.unparker();
+
+        let awoken = Arc::new(AtomicBool::new(false));
+        let waker = waker_fn({
+            let awoken = awoken.clone();
+            let unparker = unparker.clone();
+            move || {
+                awoken.store(true, Ordering::SeqCst);
+                unparker.unpark();
+            }
+        });
+        let mut cx = std::task::Context::from_waker(&waker);
+
+        let duration = Duration::from_secs(
+            option_env!("GPUI_TEST_TIMEOUT")
+                .and_then(|s| s.parse::<u64>().ok())
+                .unwrap_or(180),
+        );
+        let mut test_should_end_by = Instant::now() + duration;
+
+        loop {
+            match future.as_mut().poll(&mut cx) {
+                Poll::Ready(result) => return Ok(result),
+                Poll::Pending => {
+                    if max_ticks == 0 {
+                        return Err(future);
+                    }
+                    max_ticks -= 1;
+
+                    if !dispatcher.tick(background_only) {
+                        if awoken.swap(false, Ordering::SeqCst) {
+                            continue;
+                        }
+
+                        if !dispatcher.parking_allowed() {
+                            if dispatcher.advance_clock_to_next_delayed() {
+                                continue;
+                            }
+                            let mut backtrace_message = String::new();
+                            let mut waiting_message = String::new();
+                            if let Some(backtrace) = dispatcher.waiting_backtrace() {
+                                backtrace_message =
+                                    format!("\nbacktrace of waiting future:\n{:?}", backtrace);
+                            }
+                            if let Some(waiting_hint) = dispatcher.waiting_hint() {
+                                waiting_message = format!("\n  waiting on: {}\n", waiting_hint);
+                            }
+                            panic!(
+                                "parked with nothing left to run{waiting_message}{backtrace_message}",
+                            )
+                        }
+                        dispatcher.push_unparker(unparker.clone());
+                        parker.park_timeout(Duration::from_millis(1));
+                        if Instant::now() > test_should_end_by {
+                            panic!("test timed out after {duration:?} with allow_parking")
+                        }
+                    }
+                }
+            }
+        }
+    }
+
+    /// Block the current thread until the given future resolves
+    /// or `duration` has elapsed.
+    pub fn block_with_timeout<Fut: Future>(
+        &self,
+        duration: Duration,
+        future: Fut,
+    ) -> Result<Fut::Output, impl Future<Output = Fut::Output> + use<Fut>> {
+        self.block_internal(true, future, Some(duration))
+    }
+
     /// Scoped lets you start a number of tasks and waits
     /// for all of them to complete before returning.
     pub async fn scoped<'scope, F>(&self, scheduler: F)
@@ -263,7 +660,7 @@ impl BackgroundExecutor {
     /// Calling this instead of `std::time::Instant::now` allows the use
     /// of fake timers in tests.
     pub fn now(&self) -> Instant {
-        self.inner.scheduler().clock().now()
+        self.dispatcher.now()
     }
 
     /// Returns a task that will complete after the given duration.
@@ -273,86 +670,96 @@ impl BackgroundExecutor {
         if duration.is_zero() {
             return Task::ready(());
         }
-        self.spawn(self.inner.scheduler().timer(duration))
+        let location = core::panic::Location::caller();
+        let (runnable, task) = async_task::Builder::new()
+            .metadata(RunnableMeta {
+                location,
+                app: None,
+            })
+            .spawn(move |_| async move {}, {
+                let dispatcher = self.dispatcher.clone();
+                move |runnable| dispatcher.dispatch_after(duration, RunnableVariant::Meta(runnable))
+            });
+        runnable.schedule();
+        Task(TaskState::Spawned(task))
+    }
+
+    /// in tests, start_waiting lets you indicate which task is waiting (for debugging only)
+    #[cfg(any(test, feature = "test-support"))]
+    pub fn start_waiting(&self) {
+        self.dispatcher.as_test().unwrap().start_waiting();
     }
 
-    /// In tests, run an arbitrary number of tasks (determined by the SEED environment variable)
+    /// in tests, removes the debugging data added by start_waiting
+    #[cfg(any(test, feature = "test-support"))]
+    pub fn finish_waiting(&self) {
+        self.dispatcher.as_test().unwrap().finish_waiting();
+    }
+
+    /// in tests, run an arbitrary number of tasks (determined by the SEED environment variable)
     #[cfg(any(test, feature = "test-support"))]
     pub fn simulate_random_delay(&self) -> impl Future<Output = ()> + use<> {
         self.dispatcher.as_test().unwrap().simulate_random_delay()
     }
 
-    /// In tests, move time forward. This does not run any tasks, but does make `timer`s ready.
+    /// in tests, indicate that a given task from `spawn_labeled` should run after everything else
+    #[cfg(any(test, feature = "test-support"))]
+    pub fn deprioritize(&self, task_label: TaskLabel) {
+        self.dispatcher.as_test().unwrap().deprioritize(task_label)
+    }
+
+    /// in tests, move time forward. This does not run any tasks, but does make `timer`s ready.
     #[cfg(any(test, feature = "test-support"))]
     pub fn advance_clock(&self, duration: Duration) {
         self.dispatcher.as_test().unwrap().advance_clock(duration)
     }
 
-    /// In tests, run one task.
+    /// in tests, run one task.
     #[cfg(any(test, feature = "test-support"))]
     pub fn tick(&self) -> bool {
-        self.dispatcher.as_test().unwrap().scheduler().tick()
+        self.dispatcher.as_test().unwrap().tick(false)
     }
 
-    /// In tests, run tasks until the scheduler would park.
-    ///
-    /// Under the scheduler-backed test dispatcher, `tick()` will not advance the clock, so a pending
-    /// timer can keep `has_pending_tasks()` true even after all currently-runnable tasks have been
-    /// drained. To preserve the historical semantics that tests relied on (drain all work that can
-    /// make progress), we advance the clock to the next timer when no runnable tasks remain.
+    /// in tests, run all tasks that are ready to run. If after doing so
+    /// the test still has outstanding tasks, this will panic. (See also [`Self::allow_parking`])
     #[cfg(any(test, feature = "test-support"))]
     pub fn run_until_parked(&self) {
-        let scheduler = self.dispatcher.as_test().unwrap().scheduler();
-        scheduler.run();
+        self.dispatcher.as_test().unwrap().run_until_parked()
     }
 
-    /// In tests, prevents `run_until_parked` from panicking if there are outstanding tasks.
+    /// in tests, prevents `run_until_parked` from panicking if there are outstanding tasks.
+    /// This is useful when you are integrating other (non-GPUI) futures, like disk access, that
+    /// do take real async time to run.
     #[cfg(any(test, feature = "test-support"))]
     pub fn allow_parking(&self) {
-        self.dispatcher
-            .as_test()
-            .unwrap()
-            .scheduler()
-            .allow_parking();
-
-        if std::env::var("GPUI_RUN_UNTIL_PARKED_LOG").ok().as_deref() == Some("1") {
-            log::warn!("[gpui::executor] allow_parking: enabled");
-        }
+        self.dispatcher.as_test().unwrap().allow_parking();
     }
 
-    /// Sets the range of ticks to run before timing out in block_on.
+    /// undoes the effect of [`Self::allow_parking`].
     #[cfg(any(test, feature = "test-support"))]
-    pub fn set_block_on_ticks(&self, range: std::ops::RangeInclusive<usize>) {
-        self.dispatcher
-            .as_test()
-            .unwrap()
-            .scheduler()
-            .set_timeout_ticks(range);
+    pub fn forbid_parking(&self) {
+        self.dispatcher.as_test().unwrap().forbid_parking();
     }
 
-    /// Undoes the effect of [`Self::allow_parking`].
+    /// adds detail to the "parked with nothing let to run" message.
     #[cfg(any(test, feature = "test-support"))]
-    pub fn forbid_parking(&self) {
-        self.dispatcher
-            .as_test()
-            .unwrap()
-            .scheduler()
-            .forbid_parking();
+    pub fn set_waiting_hint(&self, msg: Option<String>) {
+        self.dispatcher.as_test().unwrap().set_waiting_hint(msg);
     }
 
-    /// In tests, returns the rng used by the dispatcher.
+    /// in tests, returns the rng used by the dispatcher and seeded by the `SEED` environment variable
     #[cfg(any(test, feature = "test-support"))]
-    pub fn rng(&self) -> scheduler::SharedRng {
-        self.dispatcher.as_test().unwrap().scheduler().rng()
+    pub fn rng(&self) -> StdRng {
+        self.dispatcher.as_test().unwrap().rng()
     }
 
     /// How many CPUs are available to the dispatcher.
     pub fn num_cpus(&self) -> usize {
         #[cfg(any(test, feature = "test-support"))]
-        if self.dispatcher.as_test().is_some() {
-            return 4;
-        }
-        num_cpus::get()
+        return 4;
+
+        #[cfg(not(any(test, feature = "test-support")))]
+        return num_cpus::get();
     }
 
     /// Whether we're on the main thread.
@@ -360,112 +767,150 @@ impl BackgroundExecutor {
         self.dispatcher.is_main_thread()
     }
 
-    #[doc(hidden)]
-    pub fn dispatcher(&self) -> &Arc<dyn PlatformDispatcher> {
-        &self.dispatcher
+    #[cfg(any(test, feature = "test-support"))]
+    /// in tests, control the number of ticks that `block_with_timeout` will run before timing out.
+    pub fn set_block_on_ticks(&self, range: std::ops::RangeInclusive<usize>) {
+        self.dispatcher.as_test().unwrap().set_block_on_ticks(range);
     }
 }
 
+/// ForegroundExecutor runs things on the main thread.
 impl ForegroundExecutor {
     /// Creates a new ForegroundExecutor from the given PlatformDispatcher.
-    pub fn new(dispatcher: Arc<dyn PlatformDispatcher>) -> Self {
-        #[cfg(any(test, feature = "test-support"))]
-        let (scheduler, session_id): (Arc<dyn Scheduler>, _) =
-            if let Some(test_dispatcher) = dispatcher.as_test() {
-                (
-                    test_dispatcher.scheduler().clone(),
-                    test_dispatcher.session_id(),
-                )
-            } else {
-                let platform_scheduler = Arc::new(PlatformScheduler::new(dispatcher.clone()));
-                let session_id = platform_scheduler.allocate_session_id();
-                (platform_scheduler, session_id)
-            };
-
-        #[cfg(not(any(test, feature = "test-support")))]
-        let (scheduler, session_id): (Arc<dyn Scheduler>, _) = {
-            let platform_scheduler = Arc::new(PlatformScheduler::new(dispatcher.clone()));
-            let session_id = platform_scheduler.allocate_session_id();
-            (platform_scheduler, session_id)
-        };
-
-        let inner = scheduler::ForegroundExecutor::new(session_id, scheduler);
-
+    pub fn new(dispatcher: Arc<dyn PlatformDispatcher>, liveness: std::sync::Weak<()>) -> Self {
         Self {
-            inner,
             dispatcher,
+            liveness,
             not_send: PhantomData,
         }
     }
 
-    /// Close this executor. Tasks will not run after this is called.
-    pub fn close(&self) {
-        self.inner.close();
-    }
-
-    /// Enqueues the given Task to run on the main thread.
+    /// Enqueues the given Task to run on the main thread at some point in the future.
     #[track_caller]
     pub fn spawn<R>(&self, future: impl Future<Output = R> + 'static) -> Task<R>
     where
         R: 'static,
     {
-        Task::from_scheduler(self.inner.spawn(future))
+        self.inner_spawn(self.liveness.clone(), Priority::default(), future)
     }
 
-    /// Enqueues the given Task to run on the main thread with the given priority.
+    /// Enqueues the given Task to run on the main thread at some point in the future.
     #[track_caller]
     pub fn spawn_with_priority<R>(
         &self,
-        _priority: Priority,
+        priority: Priority,
         future: impl Future<Output = R> + 'static,
     ) -> Task<R>
     where
         R: 'static,
     {
-        // Priority is ignored for foreground tasks - they run in order on the main thread
-        Task::from_scheduler(self.inner.spawn(future))
+        self.inner_spawn(self.liveness.clone(), priority, future)
     }
 
-    /// Used by the test harness to run an async test in a synchronous fashion.
-    #[cfg(any(test, feature = "test-support"))]
     #[track_caller]
-    pub fn block_test<R>(&self, future: impl Future<Output = R>) -> R {
-        use std::cell::Cell;
-
-        let scheduler = self.inner.scheduler();
+    pub(crate) fn inner_spawn<R>(
+        &self,
+        app: std::sync::Weak<()>,
+        priority: Priority,
+        future: impl Future<Output = R> + 'static,
+    ) -> Task<R>
+    where
+        R: 'static,
+    {
+        let dispatcher = self.dispatcher.clone();
+        let location = core::panic::Location::caller();
 
-        let output = Cell::new(None);
-        let future = async {
-            output.set(Some(future.await));
-        };
-        let mut future = std::pin::pin!(future);
+        #[track_caller]
+        fn inner<R: 'static>(
+            dispatcher: Arc<dyn PlatformDispatcher>,
+            future: AnyLocalFuture<R>,
+            location: &'static core::panic::Location<'static>,
+            app: std::sync::Weak<()>,
+            priority: Priority,
+        ) -> Task<R> {
+            let (runnable, task) = spawn_local_with_source_location(
+                future,
+                move |runnable| {
+                    dispatcher.dispatch_on_main_thread(RunnableVariant::Meta(runnable), priority)
+                },
+                RunnableMeta {
+                    location,
+                    app: Some(app),
+                },
+            );
+            runnable.schedule();
+            Task(TaskState::Spawned(task))
+        }
+        inner::<R>(dispatcher, Box::pin(future), location, app, priority)
+    }
+}
 
-        // In async GPUI tests, we must allow foreground tasks scheduled by the test itself
-        // (which are associated with the test session) to make progress while we block.
-        // Otherwise, awaiting futures that depend on same-session foreground work can deadlock.
-        scheduler.block(None, future.as_mut(), None);
+/// Variant of `async_task::spawn_local` that includes the source location of the spawn in panics.
+///
+/// Copy-modified from:
+/// <https://github.com/smol-rs/async-task/blob/ca9dbe1db9c422fd765847fa91306e30a6bb58a9/src/runnable.rs#L405>
+#[track_caller]
+fn spawn_local_with_source_location<Fut, S, M>(
+    future: Fut,
+    schedule: S,
+    metadata: M,
+) -> (Runnable<M>, async_task::Task<Fut::Output, M>)
+where
+    Fut: Future + 'static,
+    Fut::Output: 'static,
+    S: async_task::Schedule<M> + Send + Sync + 'static,
+    M: 'static,
+{
+    #[inline]
+    fn thread_id() -> ThreadId {
+        std::thread_local! {
+            static ID: ThreadId = thread::current().id();
+        }
+        ID.try_with(|id| *id)
+            .unwrap_or_else(|_| thread::current().id())
+    }
 
-        output.take().expect("block_test future did not complete")
+    struct Checked<F> {
+        id: ThreadId,
+        inner: ManuallyDrop<F>,
+        location: &'static Location<'static>,
     }
 
-    /// Block the current thread until the given future resolves.
-    /// Consider using `block_with_timeout` instead.
-    pub fn block_on<R>(&self, future: impl Future<Output = R>) -> R {
-        self.inner.block_on(future)
+    impl<F> Drop for Checked<F> {
+        fn drop(&mut self) {
+            assert!(
+                self.id == thread_id(),
+                "local task dropped by a thread that didn't spawn it. Task spawned at {}",
+                self.location
+            );
+            unsafe { ManuallyDrop::drop(&mut self.inner) };
+        }
     }
 
-    /// Block the current thread until the given future resolves or the timeout elapses.
-    pub fn block_with_timeout<R, Fut: Future<Output = R>>(
-        &self,
-        duration: Duration,
-        future: Fut,
-    ) -> Result<R, impl Future<Output = R> + use<R, Fut>> {
-        self.inner.block_with_timeout(duration, future)
+    impl<F: Future> Future for Checked<F> {
+        type Output = F::Output;
+
+        fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+            assert!(
+                self.id == thread_id(),
+                "local task polled by a thread that didn't spawn it. Task spawned at {}",
+                self.location
+            );
+            unsafe { self.map_unchecked_mut(|c| &mut *c.inner).poll(cx) }
+        }
     }
 
-    #[doc(hidden)]
-    pub fn dispatcher(&self) -> &Arc<dyn PlatformDispatcher> {
-        &self.dispatcher
+    // Wrap the future into one that checks which thread it's on.
+    let future = Checked {
+        id: thread_id(),
+        inner: ManuallyDrop::new(future),
+        location: Location::caller(),
+    };
+
+    unsafe {
+        async_task::Builder::new()
+            .metadata(metadata)
+            .spawn_unchecked(move |_| future, schedule)
     }
 }
 

crates/gpui/src/gpui.rs 🔗

@@ -20,8 +20,6 @@ pub mod colors;
 mod element;
 mod elements;
 mod executor;
-mod platform_scheduler;
-pub(crate) use platform_scheduler::PlatformScheduler;
 mod geometry;
 mod global;
 mod input;

crates/gpui/src/platform.rs 🔗

@@ -42,9 +42,10 @@ use crate::{
     Action, AnyWindowHandle, App, AsyncWindowContext, BackgroundExecutor, Bounds,
     DEFAULT_WINDOW_SIZE, DevicePixels, DispatchEventResult, Font, FontId, FontMetrics, FontRun,
     ForegroundExecutor, GlyphId, GpuSpecs, ImageSource, Keymap, LineLayout, Pixels, PlatformInput,
-    Point, Priority, RenderGlyphParams, RenderImage, RenderImageParams, RenderSvgParams, Scene,
-    ShapedGlyph, ShapedRun, SharedString, Size, SvgRenderer, SystemWindowTab, Task, TaskTiming,
-    ThreadTaskTimings, Window, WindowControlArea, hash, point, px, size,
+    Point, Priority, RealtimePriority, RenderGlyphParams, RenderImage, RenderImageParams,
+    RenderSvgParams, Scene, ShapedGlyph, ShapedRun, SharedString, Size, SvgRenderer,
+    SystemWindowTab, Task, TaskLabel, TaskTiming, ThreadTaskTimings, Window, WindowControlArea,
+    hash, point, px, size,
 };
 use anyhow::Result;
 use async_task::Runnable;
@@ -54,7 +55,6 @@ use image::RgbaImage;
 use image::codecs::gif::GifDecoder;
 use image::{AnimationDecoder as _, Frame};
 use raw_window_handle::{HasDisplayHandle, HasWindowHandle};
-pub use scheduler::RunnableMeta;
 use schemars::JsonSchema;
 use seahash::SeaHasher;
 use serde::{Deserialize, Serialize};
@@ -98,43 +98,45 @@ pub use visual_test::VisualTestPlatform;
 
 /// Returns a background executor for the current platform.
 pub fn background_executor() -> BackgroundExecutor {
-    current_platform(true).background_executor()
+    // For standalone background executor, use a dead liveness since there's no App.
+    // Weak::new() creates a weak reference that always returns None on upgrade.
+    current_platform(true, std::sync::Weak::new()).background_executor()
 }
 
 #[cfg(target_os = "macos")]
-pub(crate) fn current_platform(headless: bool) -> Rc<dyn Platform> {
-    Rc::new(MacPlatform::new(headless))
+pub(crate) fn current_platform(headless: bool, liveness: std::sync::Weak<()>) -> Rc<dyn Platform> {
+    Rc::new(MacPlatform::new(headless, liveness))
 }
 
 #[cfg(any(target_os = "linux", target_os = "freebsd"))]
-pub(crate) fn current_platform(headless: bool) -> Rc<dyn Platform> {
+pub(crate) fn current_platform(headless: bool, liveness: std::sync::Weak<()>) -> Rc<dyn Platform> {
     #[cfg(feature = "x11")]
     use anyhow::Context as _;
 
     if headless {
-        return Rc::new(HeadlessClient::new());
+        return Rc::new(HeadlessClient::new(liveness));
     }
 
     match guess_compositor() {
         #[cfg(feature = "wayland")]
-        "Wayland" => Rc::new(WaylandClient::new()),
+        "Wayland" => Rc::new(WaylandClient::new(liveness)),
 
         #[cfg(feature = "x11")]
         "X11" => Rc::new(
-            X11Client::new()
+            X11Client::new(liveness)
                 .context("Failed to initialize X11 client.")
                 .unwrap(),
         ),
 
-        "Headless" => Rc::new(HeadlessClient::new()),
+        "Headless" => Rc::new(HeadlessClient::new(liveness)),
         _ => unreachable!(),
     }
 }
 
 #[cfg(target_os = "windows")]
-pub(crate) fn current_platform(_headless: bool) -> Rc<dyn Platform> {
+pub(crate) fn current_platform(_headless: bool, liveness: std::sync::Weak<()>) -> Rc<dyn Platform> {
     Rc::new(
-        WindowsPlatform::new()
+        WindowsPlatform::new(liveness)
             .inspect_err(|err| show_error("Failed to launch", err.to_string()))
             .unwrap(),
     )
@@ -590,10 +592,40 @@ pub(crate) trait PlatformWindow: HasWindowHandle + HasDisplayHandle {
     }
 }
 
-/// Type alias for runnables with metadata.
-/// Previously an enum with a single variant, now simplified to a direct type alias.
+/// This type is public so that our test macro can generate and use it, but it should not
+/// be considered part of our public API.
 #[doc(hidden)]
-pub type RunnableVariant = Runnable<RunnableMeta>;
+pub struct RunnableMeta {
+    /// Location of the runnable
+    pub location: &'static core::panic::Location<'static>,
+    /// Weak reference to check if the app is still alive before running this task
+    pub app: Option<std::sync::Weak<()>>,
+}
+
+impl std::fmt::Debug for RunnableMeta {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        f.debug_struct("RunnableMeta")
+            .field("location", &self.location)
+            .field("app_alive", &self.is_app_alive())
+            .finish()
+    }
+}
+
+impl RunnableMeta {
+    /// Returns true if the app is still alive (or if no app tracking is configured).
+    pub fn is_app_alive(&self) -> bool {
+        match &self.app {
+            Some(weak) => weak.strong_count() > 0,
+            None => true,
+        }
+    }
+}
+
+#[doc(hidden)]
+pub enum RunnableVariant {
+    Meta(Runnable<RunnableMeta>),
+    Compat(Runnable),
+}
 
 /// This type is public so that our test macro can generate and use it, but it should not
 /// be considered part of our public API.
@@ -602,10 +634,10 @@ pub trait PlatformDispatcher: Send + Sync {
     fn get_all_timings(&self) -> Vec<ThreadTaskTimings>;
     fn get_current_thread_timings(&self) -> Vec<TaskTiming>;
     fn is_main_thread(&self) -> bool;
-    fn dispatch(&self, runnable: RunnableVariant, priority: Priority);
+    fn dispatch(&self, runnable: RunnableVariant, label: Option<TaskLabel>, priority: Priority);
     fn dispatch_on_main_thread(&self, runnable: RunnableVariant, priority: Priority);
     fn dispatch_after(&self, duration: Duration, runnable: RunnableVariant);
-    fn spawn_realtime(&self, f: Box<dyn FnOnce() + Send>);
+    fn spawn_realtime(&self, priority: RealtimePriority, f: Box<dyn FnOnce() + Send>);
 
     fn now(&self) -> Instant {
         Instant::now()

crates/gpui/src/platform/linux/dispatcher.rs 🔗

@@ -13,7 +13,8 @@ use std::{
 
 use crate::{
     GLOBAL_THREAD_TIMINGS, PlatformDispatcher, Priority, PriorityQueueReceiver,
-    PriorityQueueSender, RunnableVariant, THREAD_TIMINGS, TaskTiming, ThreadTaskTimings, profiler,
+    PriorityQueueSender, RealtimePriority, RunnableVariant, THREAD_TIMINGS, TaskLabel, TaskTiming,
+    ThreadTaskTimings, profiler,
 };
 
 struct TimerAfter {
@@ -37,34 +38,47 @@ impl LinuxDispatcher {
         let thread_count =
             std::thread::available_parallelism().map_or(MIN_THREADS, |i| i.get().max(MIN_THREADS));
 
+        // These thread should really be lower prio then the foreground
+        // executor
         let mut background_threads = (0..thread_count)
             .map(|i| {
-                let mut receiver: PriorityQueueReceiver<RunnableVariant> =
-                    background_receiver.clone();
+                let mut receiver = background_receiver.clone();
                 std::thread::Builder::new()
                     .name(format!("Worker-{i}"))
                     .spawn(move || {
                         for runnable in receiver.iter() {
-                            // Check if the executor that spawned this task was closed
-                            if runnable.metadata().is_closed() {
-                                continue;
-                            }
-
                             let start = Instant::now();
 
-                            let location = runnable.metadata().location;
-                            let mut timing = TaskTiming {
-                                location,
-                                start,
-                                end: None,
+                            let mut location = match runnable {
+                                RunnableVariant::Meta(runnable) => {
+                                    let location = runnable.metadata().location;
+                                    let timing = TaskTiming {
+                                        location,
+                                        start,
+                                        end: None,
+                                    };
+                                    profiler::add_task_timing(timing);
+
+                                    runnable.run();
+                                    timing
+                                }
+                                RunnableVariant::Compat(runnable) => {
+                                    let location = core::panic::Location::caller();
+                                    let timing = TaskTiming {
+                                        location,
+                                        start,
+                                        end: None,
+                                    };
+                                    profiler::add_task_timing(timing);
+
+                                    runnable.run();
+                                    timing
+                                }
                             };
-                            profiler::add_task_timing(timing);
-
-                            runnable.run();
 
                             let end = Instant::now();
-                            timing.end = Some(end);
-                            profiler::add_task_timing(timing);
+                            location.end = Some(end);
+                            profiler::add_task_timing(location);
 
                             log::trace!(
                                 "background thread {}: ran runnable. took: {:?}",
@@ -80,7 +94,7 @@ impl LinuxDispatcher {
         let (timer_sender, timer_channel) = calloop::channel::channel::<TimerAfter>();
         let timer_thread = std::thread::Builder::new()
             .name("Timer".to_owned())
-            .spawn(move || {
+            .spawn(|| {
                 let mut event_loop: EventLoop<()> =
                     EventLoop::try_new().expect("Failed to initialize timer loop!");
 
@@ -89,27 +103,39 @@ impl LinuxDispatcher {
                 handle
                     .insert_source(timer_channel, move |e, _, _| {
                         if let channel::Event::Msg(timer) = e {
+                            // This has to be in an option to satisfy the borrow checker. The callback below should only be scheduled once.
                             let mut runnable = Some(timer.runnable);
                             timer_handle
                                 .insert_source(
                                     calloop::timer::Timer::from_duration(timer.duration),
                                     move |_, _, _| {
                                         if let Some(runnable) = runnable.take() {
-                                            // Check if the executor that spawned this task was closed
-                                            if runnable.metadata().is_closed() {
-                                                return TimeoutAction::Drop;
-                                            }
-
                                             let start = Instant::now();
-                                            let location = runnable.metadata().location;
-                                            let mut timing = TaskTiming {
-                                                location,
-                                                start,
-                                                end: None,
+                                            let mut timing = match runnable {
+                                                RunnableVariant::Meta(runnable) => {
+                                                    let location = runnable.metadata().location;
+                                                    let timing = TaskTiming {
+                                                        location,
+                                                        start,
+                                                        end: None,
+                                                    };
+                                                    profiler::add_task_timing(timing);
+
+                                                    runnable.run();
+                                                    timing
+                                                }
+                                                RunnableVariant::Compat(runnable) => {
+                                                    let timing = TaskTiming {
+                                                        location: core::panic::Location::caller(),
+                                                        start,
+                                                        end: None,
+                                                    };
+                                                    profiler::add_task_timing(timing);
+
+                                                    runnable.run();
+                                                    timing
+                                                }
                                             };
-                                            profiler::add_task_timing(timing);
-
-                                            runnable.run();
                                             let end = Instant::now();
 
                                             timing.end = Some(end);
@@ -163,7 +189,7 @@ impl PlatformDispatcher for LinuxDispatcher {
         thread::current().id() == self.main_thread_id
     }
 
-    fn dispatch(&self, runnable: RunnableVariant, priority: Priority) {
+    fn dispatch(&self, runnable: RunnableVariant, _: Option<TaskLabel>, priority: Priority) {
         self.background_sender
             .send(priority, runnable)
             .unwrap_or_else(|_| panic!("blocking sender returned without value"));
@@ -191,13 +217,19 @@ impl PlatformDispatcher for LinuxDispatcher {
             .ok();
     }
 
-    fn spawn_realtime(&self, f: Box<dyn FnOnce() + Send>) {
+    fn spawn_realtime(&self, priority: RealtimePriority, f: Box<dyn FnOnce() + Send>) {
         std::thread::spawn(move || {
             // SAFETY: always safe to call
             let thread_id = unsafe { libc::pthread_self() };
 
-            let policy = libc::SCHED_FIFO;
-            let sched_priority = 65;
+            let policy = match priority {
+                RealtimePriority::Audio => libc::SCHED_FIFO,
+                RealtimePriority::Other => libc::SCHED_RR,
+            };
+            let sched_priority = match priority {
+                RealtimePriority::Audio => 65,
+                RealtimePriority::Other => 45,
+            };
 
             // SAFETY: all sched_param members are valid when initialized to zero.
             let mut sched_param =
@@ -206,7 +238,7 @@ impl PlatformDispatcher for LinuxDispatcher {
             // SAFETY: sched_param is a valid initialized structure
             let result = unsafe { libc::pthread_setschedparam(thread_id, policy, &sched_param) };
             if result != 0 {
-                log::warn!("failed to set realtime thread priority");
+                log::warn!("failed to set realtime thread priority to {:?}", priority);
             }
 
             f();

crates/gpui/src/platform/linux/headless/client.rs 🔗

@@ -21,17 +21,20 @@ pub struct HeadlessClientState {
 pub(crate) struct HeadlessClient(Rc<RefCell<HeadlessClientState>>);
 
 impl HeadlessClient {
-    pub(crate) fn new() -> Self {
+    pub(crate) fn new(liveness: std::sync::Weak<()>) -> Self {
         let event_loop = EventLoop::try_new().unwrap();
 
-        let (common, main_receiver) = LinuxCommon::new(event_loop.get_signal());
+        let (common, main_receiver) = LinuxCommon::new(event_loop.get_signal(), liveness);
 
         let handle = event_loop.handle();
 
         handle
             .insert_source(main_receiver, |event, _, _: &mut HeadlessClient| {
                 if let calloop::channel::Event::Msg(runnable) = event {
-                    runnable.run();
+                    match runnable {
+                        crate::RunnableVariant::Meta(runnable) => runnable.run(),
+                        crate::RunnableVariant::Compat(runnable) => runnable.run(),
+                    };
                 }
             })
             .ok();

crates/gpui/src/platform/linux/platform.rs 🔗

@@ -149,7 +149,10 @@ pub(crate) struct LinuxCommon {
 }
 
 impl LinuxCommon {
-    pub fn new(signal: LoopSignal) -> (Self, PriorityQueueCalloopReceiver<RunnableVariant>) {
+    pub fn new(
+        signal: LoopSignal,
+        liveness: std::sync::Weak<()>,
+    ) -> (Self, PriorityQueueCalloopReceiver<RunnableVariant>) {
         let (main_sender, main_receiver) = PriorityQueueCalloopReceiver::new();
 
         #[cfg(any(feature = "wayland", feature = "x11"))]
@@ -165,7 +168,7 @@ impl LinuxCommon {
 
         let common = LinuxCommon {
             background_executor,
-            foreground_executor: ForegroundExecutor::new(dispatcher),
+            foreground_executor: ForegroundExecutor::new(dispatcher, liveness),
             text_system,
             appearance: WindowAppearance::Light,
             auto_hide_scrollbars: false,

crates/gpui/src/platform/linux/wayland/client.rs 🔗

@@ -81,6 +81,10 @@ use crate::{
     PlatformInput, PlatformKeyboardLayout, Point, ResultExt as _, SCROLL_LINES, ScrollDelta,
     ScrollWheelEvent, Size, TouchPhase, WindowParams, point, profiler, px, size,
 };
+use crate::{
+    RunnableVariant, TaskTiming,
+    platform::{PlatformWindow, blade::BladeContext},
+};
 use crate::{
     SharedString,
     platform::linux::{
@@ -95,10 +99,6 @@ use crate::{
         xdg_desktop_portal::{Event as XDPEvent, XDPEventSource},
     },
 };
-use crate::{
-    TaskTiming,
-    platform::{PlatformWindow, blade::BladeContext},
-};
 
 /// Used to convert evdev scancode to xkb scancode
 const MIN_KEYCODE: u32 = 8;
@@ -453,7 +453,7 @@ fn wl_output_version(version: u32) -> u32 {
 }
 
 impl WaylandClient {
-    pub(crate) fn new() -> Self {
+    pub(crate) fn new(liveness: std::sync::Weak<()>) -> Self {
         let conn = Connection::connect_to_env().unwrap();
 
         let (globals, mut event_queue) =
@@ -490,7 +490,7 @@ impl WaylandClient {
 
         let event_loop = EventLoop::<WaylandClientStatePtr>::try_new().unwrap();
 
-        let (common, main_receiver) = LinuxCommon::new(event_loop.get_signal());
+        let (common, main_receiver) = LinuxCommon::new(event_loop.get_signal(), liveness);
 
         let handle = event_loop.handle();
         handle
@@ -500,15 +500,32 @@ impl WaylandClient {
                     if let calloop::channel::Event::Msg(runnable) = event {
                         handle.insert_idle(|_| {
                             let start = Instant::now();
-                            let location = runnable.metadata().location;
-                            let mut timing = TaskTiming {
-                                location,
-                                start,
-                                end: None,
+                            let mut timing = match runnable {
+                                RunnableVariant::Meta(runnable) => {
+                                    let location = runnable.metadata().location;
+                                    let timing = TaskTiming {
+                                        location,
+                                        start,
+                                        end: None,
+                                    };
+                                    profiler::add_task_timing(timing);
+
+                                    runnable.run();
+                                    timing
+                                }
+                                RunnableVariant::Compat(runnable) => {
+                                    let location = core::panic::Location::caller();
+                                    let timing = TaskTiming {
+                                        location,
+                                        start,
+                                        end: None,
+                                    };
+                                    profiler::add_task_timing(timing);
+
+                                    runnable.run();
+                                    timing
+                                }
                             };
-                            profiler::add_task_timing(timing);
-
-                            runnable.run();
 
                             let end = Instant::now();
                             timing.end = Some(end);

crates/gpui/src/platform/linux/x11/client.rs 🔗

@@ -1,4 +1,4 @@
-use crate::{Capslock, ResultExt as _, TaskTiming, profiler, xcb_flush};
+use crate::{Capslock, ResultExt as _, RunnableVariant, TaskTiming, profiler, xcb_flush};
 use anyhow::{Context as _, anyhow};
 use ashpd::WindowIdentifier;
 use calloop::{
@@ -297,10 +297,10 @@ impl X11ClientStatePtr {
 pub(crate) struct X11Client(Rc<RefCell<X11ClientState>>);
 
 impl X11Client {
-    pub(crate) fn new() -> anyhow::Result<Self> {
+    pub(crate) fn new(liveness: std::sync::Weak<()>) -> anyhow::Result<Self> {
         let event_loop = EventLoop::try_new()?;
 
-        let (common, main_receiver) = LinuxCommon::new(event_loop.get_signal());
+        let (common, main_receiver) = LinuxCommon::new(event_loop.get_signal(), liveness);
 
         let handle = event_loop.handle();
 
@@ -314,15 +314,32 @@ impl X11Client {
                         // callbacks.
                         handle.insert_idle(|_| {
                             let start = Instant::now();
-                            let location = runnable.metadata().location;
-                            let mut timing = TaskTiming {
-                                location,
-                                start,
-                                end: None,
+                            let mut timing = match runnable {
+                                RunnableVariant::Meta(runnable) => {
+                                    let location = runnable.metadata().location;
+                                    let timing = TaskTiming {
+                                        location,
+                                        start,
+                                        end: None,
+                                    };
+                                    profiler::add_task_timing(timing);
+
+                                    runnable.run();
+                                    timing
+                                }
+                                RunnableVariant::Compat(runnable) => {
+                                    let location = core::panic::Location::caller();
+                                    let timing = TaskTiming {
+                                        location,
+                                        start,
+                                        end: None,
+                                    };
+                                    profiler::add_task_timing(timing);
+
+                                    runnable.run();
+                                    timing
+                                }
                             };
-                            profiler::add_task_timing(timing);
-
-                            runnable.run();
 
                             let end = Instant::now();
                             timing.end = Some(end);

crates/gpui/src/platform/mac/dispatcher.rs 🔗

@@ -3,9 +3,12 @@
 #![allow(non_snake_case)]
 
 use crate::{
-    GLOBAL_THREAD_TIMINGS, PlatformDispatcher, Priority, RunnableMeta, RunnableVariant,
-    THREAD_TIMINGS, TaskTiming, ThreadTaskTimings,
+    GLOBAL_THREAD_TIMINGS, PlatformDispatcher, Priority, RealtimePriority, RunnableMeta,
+    RunnableVariant, THREAD_TIMINGS, TaskLabel, TaskTiming, ThreadTaskTimings,
 };
+
+use anyhow::Context;
+use async_task::Runnable;
 use mach2::{
     kern_return::KERN_SUCCESS,
     mach_time::mach_timebase_info_data_t,
@@ -16,9 +19,6 @@ use mach2::{
         thread_precedence_policy_data_t, thread_time_constraint_policy_data_t,
     },
 };
-use util::ResultExt;
-
-use async_task::Runnable;
 use objc::{
     class, msg_send,
     runtime::{BOOL, YES},
@@ -26,9 +26,11 @@ use objc::{
 };
 use std::{
     ffi::c_void,
+    mem::MaybeUninit,
     ptr::{NonNull, addr_of},
     time::{Duration, Instant},
 };
+use util::ResultExt;
 
 /// All items in the generated file are marked as pub, so we're gonna wrap it in a separate mod to prevent
 /// these pub items from leaking into public API.
@@ -43,12 +45,6 @@ pub(crate) fn dispatch_get_main_queue() -> dispatch_queue_t {
 
 pub(crate) struct MacDispatcher;
 
-impl MacDispatcher {
-    pub fn new() -> Self {
-        Self
-    }
-}
-
 impl PlatformDispatcher for MacDispatcher {
     fn get_all_timings(&self) -> Vec<ThreadTaskTimings> {
         let global_timings = GLOBAL_THREAD_TIMINGS.lock();
@@ -73,13 +69,20 @@ impl PlatformDispatcher for MacDispatcher {
         is_main_thread == YES
     }
 
-    fn dispatch(&self, runnable: RunnableVariant, priority: Priority) {
-        let context = runnable.into_raw().as_ptr() as *mut c_void;
+    fn dispatch(&self, runnable: RunnableVariant, _: Option<TaskLabel>, priority: Priority) {
+        let (context, trampoline) = match runnable {
+            RunnableVariant::Meta(runnable) => (
+                runnable.into_raw().as_ptr() as *mut c_void,
+                Some(trampoline as unsafe extern "C" fn(*mut c_void)),
+            ),
+            RunnableVariant::Compat(runnable) => (
+                runnable.into_raw().as_ptr() as *mut c_void,
+                Some(trampoline_compat as unsafe extern "C" fn(*mut c_void)),
+            ),
+        };
 
         let queue_priority = match priority {
-            Priority::RealtimeAudio => {
-                panic!("RealtimeAudio priority should use spawn_realtime, not dispatch")
-            }
+            Priority::Realtime(_) => unreachable!(),
             Priority::High => DISPATCH_QUEUE_PRIORITY_HIGH as isize,
             Priority::Medium => DISPATCH_QUEUE_PRIORITY_DEFAULT as isize,
             Priority::Low => DISPATCH_QUEUE_PRIORITY_LOW as isize,
@@ -89,45 +92,76 @@ impl PlatformDispatcher for MacDispatcher {
             dispatch_async_f(
                 dispatch_get_global_queue(queue_priority, 0),
                 context,
-                Some(trampoline as unsafe extern "C" fn(*mut c_void)),
+                trampoline,
             );
         }
     }
 
     fn dispatch_on_main_thread(&self, runnable: RunnableVariant, _priority: Priority) {
-        let context = runnable.into_raw().as_ptr() as *mut c_void;
-        unsafe {
-            dispatch_async_f(
-                dispatch_get_main_queue(),
-                context,
+        let (context, trampoline) = match runnable {
+            RunnableVariant::Meta(runnable) => (
+                runnable.into_raw().as_ptr() as *mut c_void,
                 Some(trampoline as unsafe extern "C" fn(*mut c_void)),
-            );
+            ),
+            RunnableVariant::Compat(runnable) => (
+                runnable.into_raw().as_ptr() as *mut c_void,
+                Some(trampoline_compat as unsafe extern "C" fn(*mut c_void)),
+            ),
+        };
+        unsafe {
+            dispatch_async_f(dispatch_get_main_queue(), context, trampoline);
         }
     }
 
     fn dispatch_after(&self, duration: Duration, runnable: RunnableVariant) {
-        let context = runnable.into_raw().as_ptr() as *mut c_void;
+        let (context, trampoline) = match runnable {
+            RunnableVariant::Meta(runnable) => (
+                runnable.into_raw().as_ptr() as *mut c_void,
+                Some(trampoline as unsafe extern "C" fn(*mut c_void)),
+            ),
+            RunnableVariant::Compat(runnable) => (
+                runnable.into_raw().as_ptr() as *mut c_void,
+                Some(trampoline_compat as unsafe extern "C" fn(*mut c_void)),
+            ),
+        };
         unsafe {
             let queue =
                 dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_HIGH.try_into().unwrap(), 0);
             let when = dispatch_time(DISPATCH_TIME_NOW as u64, duration.as_nanos() as i64);
-            dispatch_after_f(
-                when,
-                queue,
-                context,
-                Some(trampoline as unsafe extern "C" fn(*mut c_void)),
-            );
+            dispatch_after_f(when, queue, context, trampoline);
         }
     }
 
-    fn spawn_realtime(&self, f: Box<dyn FnOnce() + Send>) {
+    fn spawn_realtime(&self, priority: RealtimePriority, f: Box<dyn FnOnce() + Send>) {
         std::thread::spawn(move || {
-            set_audio_thread_priority().log_err();
+            match priority {
+                RealtimePriority::Audio => set_audio_thread_priority(),
+                RealtimePriority::Other => set_high_thread_priority(),
+            }
+            .context(format!("for priority {:?}", priority))
+            .log_err();
+
             f();
         });
     }
 }
 
+fn set_high_thread_priority() -> anyhow::Result<()> {
+    // SAFETY: always safe to call
+    let thread_id = unsafe { libc::pthread_self() };
+
+    // SAFETY: all sched_param members are valid when initialized to zero.
+    let mut sched_param = unsafe { MaybeUninit::<libc::sched_param>::zeroed().assume_init() };
+    sched_param.sched_priority = 45;
+
+    let result = unsafe { libc::pthread_setschedparam(thread_id, libc::SCHED_FIFO, &sched_param) };
+    if result != 0 {
+        anyhow::bail!("failed to set realtime thread priority")
+    }
+
+    Ok(())
+}
+
 fn set_audio_thread_priority() -> anyhow::Result<()> {
     // https://chromium.googlesource.com/chromium/chromium/+/master/base/threading/platform_thread_mac.mm#93
 
@@ -213,19 +247,18 @@ fn set_audio_thread_priority() -> anyhow::Result<()> {
     Ok(())
 }
 
-extern "C" fn trampoline(context: *mut c_void) {
-    let runnable =
-        unsafe { Runnable::<RunnableMeta>::from_raw(NonNull::new_unchecked(context as *mut ())) };
+extern "C" fn trampoline(runnable: *mut c_void) {
+    let task =
+        unsafe { Runnable::<RunnableMeta>::from_raw(NonNull::new_unchecked(runnable as *mut ())) };
 
-    let metadata = runnable.metadata();
+    let metadata = task.metadata();
+    let location = metadata.location;
 
-    // Check if the executor that spawned this task was closed
-    if metadata.is_closed() {
+    if !metadata.is_app_alive() {
+        drop(task);
         return;
     }
 
-    let location = metadata.location;
-
     let start = Instant::now();
     let timing = TaskTiming {
         location,
@@ -245,7 +278,43 @@ extern "C" fn trampoline(context: *mut c_void) {
         timings.push_back(timing);
     });
 
-    runnable.run();
+    task.run();
+    let end = Instant::now();
+
+    THREAD_TIMINGS.with(|timings| {
+        let mut timings = timings.lock();
+        let timings = &mut timings.timings;
+        let Some(last_timing) = timings.iter_mut().rev().next() else {
+            return;
+        };
+        last_timing.end = Some(end);
+    });
+}
+
+extern "C" fn trampoline_compat(runnable: *mut c_void) {
+    let task = unsafe { Runnable::<()>::from_raw(NonNull::new_unchecked(runnable as *mut ())) };
+
+    let location = core::panic::Location::caller();
+
+    let start = Instant::now();
+    let timing = TaskTiming {
+        location,
+        start,
+        end: None,
+    };
+    THREAD_TIMINGS.with(|timings| {
+        let mut timings = timings.lock();
+        let timings = &mut timings.timings;
+        if let Some(last_timing) = timings.iter_mut().rev().next() {
+            if last_timing.location == timing.location {
+                return;
+            }
+        }
+
+        timings.push_back(timing);
+    });
+
+    task.run();
     let end = Instant::now();
 
     THREAD_TIMINGS.with(|timings| {

crates/gpui/src/platform/mac/platform.rs 🔗

@@ -174,8 +174,8 @@ pub(crate) struct MacPlatformState {
 }
 
 impl MacPlatform {
-    pub(crate) fn new(headless: bool) -> Self {
-        let dispatcher = Arc::new(MacDispatcher::new());
+    pub(crate) fn new(headless: bool, liveness: std::sync::Weak<()>) -> Self {
+        let dispatcher = Arc::new(MacDispatcher);
 
         #[cfg(feature = "font-kit")]
         let text_system = Arc::new(crate::MacTextSystem::new());
@@ -190,7 +190,7 @@ impl MacPlatform {
             headless,
             text_system,
             background_executor: BackgroundExecutor::new(dispatcher.clone()),
-            foreground_executor: ForegroundExecutor::new(dispatcher),
+            foreground_executor: ForegroundExecutor::new(dispatcher, liveness),
             renderer_context: renderer::Context::default(),
             general_pasteboard: Pasteboard::general(),
             find_pasteboard: Pasteboard::find(),

crates/gpui/src/platform/test/dispatcher.rs 🔗

@@ -1,78 +1,275 @@
-use crate::{PlatformDispatcher, Priority, RunnableVariant};
-use scheduler::{Clock, Scheduler, SessionId, TestScheduler, TestSchedulerConfig, Yield};
+use crate::{PlatformDispatcher, Priority, RunnableVariant, TaskLabel};
+use backtrace::Backtrace;
+use collections::{HashMap, HashSet, VecDeque};
+use parking::Unparker;
+use parking_lot::Mutex;
+use rand::prelude::*;
 use std::{
+    future::Future,
+    ops::RangeInclusive,
+    pin::Pin,
     sync::Arc,
+    task::{Context, Poll},
     time::{Duration, Instant},
 };
+use util::post_inc;
+
+#[derive(Copy, Clone, PartialEq, Eq, Hash)]
+struct TestDispatcherId(usize);
 
-/// TestDispatcher provides deterministic async execution for tests.
-///
-/// This implementation delegates task scheduling to the scheduler crate's `TestScheduler`.
-/// Access the scheduler directly via `scheduler()` for clock, rng, and parking control.
 #[doc(hidden)]
 pub struct TestDispatcher {
-    session_id: SessionId,
-    scheduler: Arc<TestScheduler>,
+    id: TestDispatcherId,
+    state: Arc<Mutex<TestDispatcherState>>,
+}
+
+struct TestDispatcherState {
+    random: StdRng,
+    foreground: HashMap<TestDispatcherId, VecDeque<RunnableVariant>>,
+    background: Vec<RunnableVariant>,
+    deprioritized_background: Vec<RunnableVariant>,
+    delayed: Vec<(Duration, RunnableVariant)>,
+    start_time: Instant,
+    time: Duration,
+    is_main_thread: bool,
+    next_id: TestDispatcherId,
+    allow_parking: bool,
+    waiting_hint: Option<String>,
+    waiting_backtrace: Option<Backtrace>,
+    deprioritized_task_labels: HashSet<TaskLabel>,
+    block_on_ticks: RangeInclusive<usize>,
+    unparkers: Vec<Unparker>,
 }
 
 impl TestDispatcher {
-    pub fn new(seed: u64) -> Self {
-        let scheduler = Arc::new(TestScheduler::new(TestSchedulerConfig {
-            seed,
-            randomize_order: true,
+    pub fn new(random: StdRng) -> Self {
+        let state = TestDispatcherState {
+            random,
+            foreground: HashMap::default(),
+            background: Vec::new(),
+            deprioritized_background: Vec::new(),
+            delayed: Vec::new(),
+            time: Duration::ZERO,
+            start_time: Instant::now(),
+            is_main_thread: true,
+            next_id: TestDispatcherId(1),
             allow_parking: false,
-            capture_pending_traces: std::env::var("PENDING_TRACES")
-                .map_or(false, |var| var == "1" || var == "true"),
-            timeout_ticks: 0..=1000,
-        }));
-
-        let session_id = scheduler.allocate_session_id();
+            waiting_hint: None,
+            waiting_backtrace: None,
+            deprioritized_task_labels: Default::default(),
+            block_on_ticks: 0..=1000,
+            unparkers: Default::default(),
+        };
 
         TestDispatcher {
-            session_id,
-            scheduler,
+            id: TestDispatcherId(0),
+            state: Arc::new(Mutex::new(state)),
         }
     }
 
-    pub fn scheduler(&self) -> &Arc<TestScheduler> {
-        &self.scheduler
+    pub fn advance_clock(&self, by: Duration) {
+        let new_now = self.state.lock().time + by;
+        loop {
+            self.run_until_parked();
+            let state = self.state.lock();
+            let next_due_time = state.delayed.first().map(|(time, _)| *time);
+            drop(state);
+            if let Some(due_time) = next_due_time
+                && due_time <= new_now
+            {
+                self.state.lock().time = due_time;
+                continue;
+            }
+            break;
+        }
+        self.state.lock().time = new_now;
     }
 
-    pub fn session_id(&self) -> SessionId {
-        self.session_id
+    pub fn advance_clock_to_next_delayed(&self) -> bool {
+        let next_due_time = self.state.lock().delayed.first().map(|(time, _)| *time);
+        if let Some(next_due_time) = next_due_time {
+            self.state.lock().time = next_due_time;
+            return true;
+        }
+        false
     }
 
-    pub fn advance_clock(&self, by: Duration) {
-        self.scheduler.advance_clock(by);
-    }
+    pub fn simulate_random_delay(&self) -> impl 'static + Send + Future<Output = ()> + use<> {
+        struct YieldNow {
+            pub(crate) count: usize,
+        }
 
-    pub fn advance_clock_to_next_timer(&self) -> bool {
-        self.scheduler.advance_clock_to_next_timer()
-    }
+        impl Future for YieldNow {
+            type Output = ();
 
-    pub fn simulate_random_delay(&self) -> Yield {
-        self.scheduler.yield_random()
+            fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
+                if self.count > 0 {
+                    self.count -= 1;
+                    cx.waker().wake_by_ref();
+                    Poll::Pending
+                } else {
+                    Poll::Ready(())
+                }
+            }
+        }
+
+        YieldNow {
+            count: self.state.lock().random.random_range(0..10),
+        }
     }
 
     pub fn tick(&self, background_only: bool) -> bool {
-        if background_only {
-            self.scheduler.tick_background_only()
-        } else {
-            self.scheduler.tick()
+        let mut state = self.state.lock();
+
+        while let Some((deadline, _)) = state.delayed.first() {
+            if *deadline > state.time {
+                break;
+            }
+            let (_, runnable) = state.delayed.remove(0);
+            state.background.push(runnable);
         }
+
+        let foreground_len: usize = if background_only {
+            0
+        } else {
+            state
+                .foreground
+                .values()
+                .map(|runnables| runnables.len())
+                .sum()
+        };
+        let background_len = state.background.len();
+
+        let runnable;
+        let main_thread;
+        if foreground_len == 0 && background_len == 0 {
+            let deprioritized_background_len = state.deprioritized_background.len();
+            if deprioritized_background_len == 0 {
+                return false;
+            }
+            let ix = state.random.random_range(0..deprioritized_background_len);
+            main_thread = false;
+            runnable = state.deprioritized_background.swap_remove(ix);
+        } else {
+            main_thread = state.random.random_ratio(
+                foreground_len as u32,
+                (foreground_len + background_len) as u32,
+            );
+            if main_thread {
+                let state = &mut *state;
+                runnable = state
+                    .foreground
+                    .values_mut()
+                    .filter(|runnables| !runnables.is_empty())
+                    .choose(&mut state.random)
+                    .unwrap()
+                    .pop_front()
+                    .unwrap();
+            } else {
+                let ix = state.random.random_range(0..background_len);
+                runnable = state.background.swap_remove(ix);
+            };
+        };
+
+        let was_main_thread = state.is_main_thread;
+        state.is_main_thread = main_thread;
+        drop(state);
+
+        // todo(localcc): add timings to tests
+        match runnable {
+            RunnableVariant::Meta(runnable) => {
+                if !runnable.metadata().is_app_alive() {
+                    drop(runnable);
+                } else {
+                    runnable.run();
+                }
+            }
+            RunnableVariant::Compat(runnable) => {
+                runnable.run();
+            }
+        };
+
+        self.state.lock().is_main_thread = was_main_thread;
+
+        true
+    }
+
+    pub fn deprioritize(&self, task_label: TaskLabel) {
+        self.state
+            .lock()
+            .deprioritized_task_labels
+            .insert(task_label);
     }
 
     pub fn run_until_parked(&self) {
         while self.tick(false) {}
     }
+
+    pub fn parking_allowed(&self) -> bool {
+        self.state.lock().allow_parking
+    }
+
+    pub fn allow_parking(&self) {
+        self.state.lock().allow_parking = true
+    }
+
+    pub fn forbid_parking(&self) {
+        self.state.lock().allow_parking = false
+    }
+
+    pub fn set_waiting_hint(&self, msg: Option<String>) {
+        self.state.lock().waiting_hint = msg
+    }
+
+    pub fn waiting_hint(&self) -> Option<String> {
+        self.state.lock().waiting_hint.clone()
+    }
+
+    pub fn start_waiting(&self) {
+        self.state.lock().waiting_backtrace = Some(Backtrace::new_unresolved());
+    }
+
+    pub fn finish_waiting(&self) {
+        self.state.lock().waiting_backtrace.take();
+    }
+
+    pub fn waiting_backtrace(&self) -> Option<Backtrace> {
+        self.state.lock().waiting_backtrace.take().map(|mut b| {
+            b.resolve();
+            b
+        })
+    }
+
+    pub fn rng(&self) -> StdRng {
+        self.state.lock().random.clone()
+    }
+
+    pub fn set_block_on_ticks(&self, range: std::ops::RangeInclusive<usize>) {
+        self.state.lock().block_on_ticks = range;
+    }
+
+    pub fn gen_block_on_ticks(&self) -> usize {
+        let mut lock = self.state.lock();
+        let block_on_ticks = lock.block_on_ticks.clone();
+        lock.random.random_range(block_on_ticks)
+    }
+
+    pub fn unpark_all(&self) {
+        self.state.lock().unparkers.retain(|parker| parker.unpark());
+    }
+
+    pub fn push_unparker(&self, unparker: Unparker) {
+        let mut state = self.state.lock();
+        state.unparkers.push(unparker);
+    }
 }
 
 impl Clone for TestDispatcher {
     fn clone(&self) -> Self {
-        let session_id = self.scheduler.allocate_session_id();
+        let id = post_inc(&mut self.state.lock().next_id.0);
         Self {
-            session_id,
-            scheduler: self.scheduler.clone(),
+            id: TestDispatcherId(id),
+            state: self.state.clone(),
         }
     }
 }
@@ -87,35 +284,50 @@ impl PlatformDispatcher for TestDispatcher {
     }
 
     fn is_main_thread(&self) -> bool {
-        self.scheduler.is_main_thread()
+        self.state.lock().is_main_thread
     }
 
     fn now(&self) -> Instant {
-        self.scheduler.clock().now()
+        let state = self.state.lock();
+        state.start_time + state.time
     }
 
-    fn dispatch(&self, runnable: RunnableVariant, priority: Priority) {
-        self.scheduler
-            .schedule_background_with_priority(runnable, priority);
+    fn dispatch(&self, runnable: RunnableVariant, label: Option<TaskLabel>, _priority: Priority) {
+        {
+            let mut state = self.state.lock();
+            if label.is_some_and(|label| state.deprioritized_task_labels.contains(&label)) {
+                state.deprioritized_background.push(runnable);
+            } else {
+                state.background.push(runnable);
+            }
+        }
+        self.unpark_all();
     }
 
     fn dispatch_on_main_thread(&self, runnable: RunnableVariant, _priority: Priority) {
-        self.scheduler
-            .schedule_foreground(self.session_id, runnable);
+        self.state
+            .lock()
+            .foreground
+            .entry(self.id)
+            .or_default()
+            .push_back(runnable);
+        self.unpark_all();
     }
 
-    fn dispatch_after(&self, _duration: Duration, _runnable: RunnableVariant) {
-        panic!(
-            "dispatch_after should not be called in tests. \
-            Use BackgroundExecutor::timer() which uses the scheduler's native timer."
-        );
+    fn dispatch_after(&self, duration: std::time::Duration, runnable: RunnableVariant) {
+        let mut state = self.state.lock();
+        let next_time = state.time + duration;
+        let ix = match state.delayed.binary_search_by_key(&next_time, |e| e.0) {
+            Ok(ix) | Err(ix) => ix,
+        };
+        state.delayed.insert(ix, (next_time, runnable));
     }
 
     fn as_test(&self) -> Option<&TestDispatcher> {
         Some(self)
     }
 
-    fn spawn_realtime(&self, f: Box<dyn FnOnce() + Send>) {
+    fn spawn_realtime(&self, _priority: crate::RealtimePriority, f: Box<dyn FnOnce() + Send>) {
         std::thread::spawn(move || {
             f();
         });

crates/gpui/src/platform/test/platform.rs 🔗

@@ -139,6 +139,7 @@ impl TestPlatform {
             .new_path
             .pop_front()
             .expect("no pending new path prompt");
+        self.background_executor().set_waiting_hint(None);
         tx.send(Ok(select_path(&path))).ok();
     }
 
@@ -150,6 +151,7 @@ impl TestPlatform {
             .multiple_choice
             .pop_front()
             .expect("no pending multiple choice prompt");
+        self.background_executor().set_waiting_hint(None);
         let Some(ix) = prompt.answers.iter().position(|a| a == response) else {
             panic!(
                 "PROMPT: {}\n{:?}\n{:?}\nCannot respond with {}",
@@ -184,6 +186,8 @@ impl TestPlatform {
     ) -> oneshot::Receiver<usize> {
         let (tx, rx) = oneshot::channel();
         let answers: Vec<String> = answers.iter().map(|s| s.label().to_string()).collect();
+        self.background_executor()
+            .set_waiting_hint(Some(format!("PROMPT: {:?} {:?}", msg, detail)));
         self.prompts
             .borrow_mut()
             .multiple_choice
@@ -348,6 +352,8 @@ impl Platform for TestPlatform {
         _suggested_name: Option<&str>,
     ) -> oneshot::Receiver<Result<Option<std::path::PathBuf>>> {
         let (tx, rx) = oneshot::channel();
+        self.background_executor()
+            .set_waiting_hint(Some(format!("PROMPT FOR PATH: {:?}", directory)));
         self.prompts
             .borrow_mut()
             .new_path

crates/gpui/src/platform/visual_test.rs 🔗

@@ -16,7 +16,7 @@ use crate::{
 use anyhow::Result;
 use futures::channel::oneshot;
 use parking_lot::Mutex;
-
+use rand::SeedableRng;
 use std::{
     path::{Path, PathBuf},
     rc::Rc,
@@ -39,17 +39,19 @@ pub struct VisualTestPlatform {
 }
 
 impl VisualTestPlatform {
-    /// Creates a new VisualTestPlatform with the given random seed.
+    /// Creates a new VisualTestPlatform with the given random seed and liveness tracker.
     ///
     /// The seed is used for deterministic random number generation in the TestDispatcher.
-    pub fn new(seed: u64) -> Self {
-        let dispatcher = TestDispatcher::new(seed);
+    /// The liveness weak reference is used to track when the app is being shut down.
+    pub fn new(seed: u64, liveness: std::sync::Weak<()>) -> Self {
+        let rng = rand::rngs::StdRng::seed_from_u64(seed);
+        let dispatcher = TestDispatcher::new(rng);
         let arc_dispatcher = Arc::new(dispatcher.clone());
 
         let background_executor = BackgroundExecutor::new(arc_dispatcher.clone());
-        let foreground_executor = ForegroundExecutor::new(arc_dispatcher);
+        let foreground_executor = ForegroundExecutor::new(arc_dispatcher, liveness.clone());
 
-        let mac_platform = MacPlatform::new(false);
+        let mac_platform = MacPlatform::new(false, liveness);
 
         Self {
             dispatcher,

crates/gpui/src/platform/windows/dispatcher.rs 🔗

@@ -14,7 +14,7 @@ use windows::{
         Foundation::{LPARAM, WPARAM},
         System::Threading::{
             GetCurrentThread, HIGH_PRIORITY_CLASS, SetPriorityClass, SetThreadPriority,
-            THREAD_PRIORITY_TIME_CRITICAL,
+            THREAD_PRIORITY_HIGHEST, THREAD_PRIORITY_TIME_CRITICAL,
         },
         UI::WindowsAndMessaging::PostMessageW,
     },
@@ -22,8 +22,8 @@ use windows::{
 
 use crate::{
     GLOBAL_THREAD_TIMINGS, HWND, PlatformDispatcher, Priority, PriorityQueueSender,
-    RunnableVariant, SafeHwnd, THREAD_TIMINGS, TaskTiming, ThreadTaskTimings,
-    WM_GPUI_TASK_DISPATCHED_ON_MAIN_THREAD, profiler,
+    RealtimePriority, RunnableVariant, SafeHwnd, THREAD_TIMINGS, TaskLabel, TaskTiming,
+    ThreadTaskTimings, WM_GPUI_TASK_DISPATCHED_ON_MAIN_THREAD, profiler,
 };
 
 pub(crate) struct WindowsDispatcher {
@@ -56,12 +56,7 @@ impl WindowsDispatcher {
         let handler = {
             let mut task_wrapper = Some(runnable);
             WorkItemHandler::new(move |_| {
-                let runnable = task_wrapper.take().unwrap();
-                // Check if the executor that spawned this task was closed
-                if runnable.metadata().is_closed() {
-                    return Ok(());
-                }
-                Self::execute_runnable(runnable);
+                Self::execute_runnable(task_wrapper.take().unwrap());
                 Ok(())
             })
         };
@@ -73,12 +68,7 @@ impl WindowsDispatcher {
         let handler = {
             let mut task_wrapper = Some(runnable);
             TimerElapsedHandler::new(move |_| {
-                let runnable = task_wrapper.take().unwrap();
-                // Check if the executor that spawned this task was closed
-                if runnable.metadata().is_closed() {
-                    return Ok(());
-                }
-                Self::execute_runnable(runnable);
+                Self::execute_runnable(task_wrapper.take().unwrap());
                 Ok(())
             })
         };
@@ -89,15 +79,33 @@ impl WindowsDispatcher {
     pub(crate) fn execute_runnable(runnable: RunnableVariant) {
         let start = Instant::now();
 
-        let location = runnable.metadata().location;
-        let mut timing = TaskTiming {
-            location,
-            start,
-            end: None,
-        };
-        profiler::add_task_timing(timing);
+        let mut timing = match runnable {
+            RunnableVariant::Meta(runnable) => {
+                let location = runnable.metadata().location;
+                let timing = TaskTiming {
+                    location,
+                    start,
+                    end: None,
+                };
+                profiler::add_task_timing(timing);
+
+                runnable.run();
+
+                timing
+            }
+            RunnableVariant::Compat(runnable) => {
+                let timing = TaskTiming {
+                    location: core::panic::Location::caller(),
+                    start,
+                    end: None,
+                };
+                profiler::add_task_timing(timing);
 
-        runnable.run();
+                runnable.run();
+
+                timing
+            }
+        };
 
         let end = Instant::now();
         timing.end = Some(end);
@@ -130,16 +138,18 @@ impl PlatformDispatcher for WindowsDispatcher {
         current().id() == self.main_thread_id
     }
 
-    fn dispatch(&self, runnable: RunnableVariant, priority: Priority) {
+    fn dispatch(&self, runnable: RunnableVariant, label: Option<TaskLabel>, priority: Priority) {
         let priority = match priority {
-            Priority::RealtimeAudio => {
-                panic!("RealtimeAudio priority should use spawn_realtime, not dispatch")
-            }
+            Priority::Realtime(_) => unreachable!(),
             Priority::High => WorkItemPriority::High,
             Priority::Medium => WorkItemPriority::Normal,
             Priority::Low => WorkItemPriority::Low,
         };
         self.dispatch_on_threadpool(priority, runnable);
+
+        if let Some(label) = label {
+            log::debug!("TaskLabel: {label:?}");
+        }
     }
 
     fn dispatch_on_main_thread(&self, runnable: RunnableVariant, priority: Priority) {
@@ -175,18 +185,23 @@ impl PlatformDispatcher for WindowsDispatcher {
         self.dispatch_on_threadpool_after(runnable, duration);
     }
 
-    fn spawn_realtime(&self, f: Box<dyn FnOnce() + Send>) {
+    fn spawn_realtime(&self, priority: RealtimePriority, f: Box<dyn FnOnce() + Send>) {
         std::thread::spawn(move || {
             // SAFETY: always safe to call
             let thread_handle = unsafe { GetCurrentThread() };
 
+            let thread_priority = match priority {
+                RealtimePriority::Audio => THREAD_PRIORITY_TIME_CRITICAL,
+                RealtimePriority::Other => THREAD_PRIORITY_HIGHEST,
+            };
+
             // SAFETY: thread_handle is a valid handle to a thread
             unsafe { SetPriorityClass(thread_handle, HIGH_PRIORITY_CLASS) }
                 .context("thread priority class")
                 .log_err();
 
             // SAFETY: thread_handle is a valid handle to a thread
-            unsafe { SetThreadPriority(thread_handle, THREAD_PRIORITY_TIME_CRITICAL) }
+            unsafe { SetThreadPriority(thread_handle, thread_priority) }
                 .context("thread priority")
                 .log_err();
 

crates/gpui/src/platform/windows/platform.rs 🔗

@@ -93,7 +93,7 @@ impl WindowsPlatformState {
 }
 
 impl WindowsPlatform {
-    pub(crate) fn new() -> Result<Self> {
+    pub(crate) fn new(liveness: std::sync::Weak<()>) -> Result<Self> {
         unsafe {
             OleInitialize(None).context("unable to initialize Windows OLE")?;
         }
@@ -148,7 +148,7 @@ impl WindowsPlatform {
         let disable_direct_composition = std::env::var(DISABLE_DIRECT_COMPOSITION)
             .is_ok_and(|value| value == "true" || value == "1");
         let background_executor = BackgroundExecutor::new(dispatcher.clone());
-        let foreground_executor = ForegroundExecutor::new(dispatcher);
+        let foreground_executor = ForegroundExecutor::new(dispatcher, liveness);
 
         let drop_target_helper: IDropTargetHelper = unsafe {
             CoCreateInstance(&CLSID_DragDropHelper, None, CLSCTX_INPROC_SERVER)

crates/gpui/src/platform_scheduler.rs 🔗

@@ -1,138 +0,0 @@
-use crate::{PlatformDispatcher, RunnableMeta};
-use async_task::Runnable;
-use chrono::{DateTime, Utc};
-use futures::channel::oneshot;
-use scheduler::{Clock, Priority, Scheduler, SessionId, TestScheduler, Timer};
-use std::{
-    future::Future,
-    pin::Pin,
-    sync::{
-        Arc,
-        atomic::{AtomicU16, Ordering},
-    },
-    task::{Context, Poll},
-    time::{Duration, Instant},
-};
-use waker_fn::waker_fn;
-
-/// A production implementation of [`Scheduler`] that wraps a [`PlatformDispatcher`].
-///
-/// This allows GPUI to use the scheduler crate's executor types with the platform's
-/// native dispatch mechanisms (e.g., Grand Central Dispatch on macOS).
-pub struct PlatformScheduler {
-    dispatcher: Arc<dyn PlatformDispatcher>,
-    clock: Arc<PlatformClock>,
-    next_session_id: AtomicU16,
-}
-
-impl PlatformScheduler {
-    pub fn new(dispatcher: Arc<dyn PlatformDispatcher>) -> Self {
-        Self {
-            dispatcher: dispatcher.clone(),
-            clock: Arc::new(PlatformClock { dispatcher }),
-            next_session_id: AtomicU16::new(0),
-        }
-    }
-
-    pub fn allocate_session_id(&self) -> SessionId {
-        SessionId::new(self.next_session_id.fetch_add(1, Ordering::SeqCst))
-    }
-}
-
-impl Scheduler for PlatformScheduler {
-    fn block(
-        &self,
-        _session_id: Option<SessionId>,
-        mut future: Pin<&mut dyn Future<Output = ()>>,
-        timeout: Option<Duration>,
-    ) -> bool {
-        let deadline = timeout.map(|t| Instant::now() + t);
-        let parker = parking::Parker::new();
-        let unparker = parker.unparker();
-        let waker = waker_fn(move || {
-            unparker.unpark();
-        });
-        let mut cx = Context::from_waker(&waker);
-
-        loop {
-            match future.as_mut().poll(&mut cx) {
-                Poll::Ready(()) => return true,
-                Poll::Pending => {
-                    if let Some(deadline) = deadline {
-                        let now = Instant::now();
-                        if now >= deadline {
-                            return false;
-                        }
-                        parker.park_timeout(deadline - now);
-                    } else {
-                        parker.park();
-                    }
-                }
-            }
-        }
-    }
-
-    fn schedule_foreground(&self, _session_id: SessionId, runnable: Runnable<RunnableMeta>) {
-        self.dispatcher
-            .dispatch_on_main_thread(runnable, Priority::default());
-    }
-
-    fn schedule_background_with_priority(
-        &self,
-        runnable: Runnable<RunnableMeta>,
-        priority: Priority,
-    ) {
-        self.dispatcher.dispatch(runnable, priority);
-    }
-
-    fn spawn_realtime(&self, f: Box<dyn FnOnce() + Send>) {
-        self.dispatcher.spawn_realtime(f);
-    }
-
-    fn timer(&self, duration: Duration) -> Timer {
-        use std::sync::{Arc, atomic::AtomicBool};
-
-        let (tx, rx) = oneshot::channel();
-        let dispatcher = self.dispatcher.clone();
-
-        // Create a runnable that will send the completion signal
-        let location = std::panic::Location::caller();
-        let closed = Arc::new(AtomicBool::new(false));
-        let (runnable, _task) = async_task::Builder::new()
-            .metadata(RunnableMeta { location, closed })
-            .spawn(
-                move |_| async move {
-                    let _ = tx.send(());
-                },
-                move |runnable| {
-                    dispatcher.dispatch_after(duration, runnable);
-                },
-            );
-        runnable.schedule();
-
-        Timer::new(rx)
-    }
-
-    fn clock(&self) -> Arc<dyn Clock> {
-        self.clock.clone()
-    }
-
-    fn as_test(&self) -> Option<&TestScheduler> {
-        None
-    }
-}
-
-/// A production clock that uses the platform dispatcher's time.
-struct PlatformClock {
-    dispatcher: Arc<dyn PlatformDispatcher>,
-}
-
-impl Clock for PlatformClock {
-    fn utc_now(&self) -> DateTime<Utc> {
-        Utc::now()
-    }
-
-    fn now(&self) -> Instant {
-        self.dispatcher.now()
-    }
-}

crates/gpui/src/profiler.rs 🔗

@@ -217,7 +217,6 @@ impl Drop for ThreadTimings {
     }
 }
 
-#[allow(dead_code)] // Used by Linux and Windows dispatchers, not macOS
 pub(crate) fn add_task_timing(timing: TaskTiming) {
     THREAD_TIMINGS.with(|timings| {
         let mut timings = timings.lock();

crates/gpui/src/queue.rs 🔗

@@ -42,9 +42,7 @@ impl<T> PriorityQueueState<T> {
 
         let mut queues = self.queues.lock();
         match priority {
-            Priority::RealtimeAudio => unreachable!(
-                "Realtime audio priority runs on a dedicated thread and is never queued"
-            ),
+            Priority::Realtime(_) => unreachable!(),
             Priority::High => queues.high_priority.push_back(item),
             Priority::Medium => queues.medium_priority.push_back(item),
             Priority::Low => queues.low_priority.push_back(item),
@@ -221,29 +219,29 @@ impl<T> PriorityQueueReceiver<T> {
             self.state.recv()?
         };
 
-        let high = P::High.weight() * !queues.high_priority.is_empty() as u32;
-        let medium = P::Medium.weight() * !queues.medium_priority.is_empty() as u32;
-        let low = P::Low.weight() * !queues.low_priority.is_empty() as u32;
+        let high = P::High.probability() * !queues.high_priority.is_empty() as u32;
+        let medium = P::Medium.probability() * !queues.medium_priority.is_empty() as u32;
+        let low = P::Low.probability() * !queues.low_priority.is_empty() as u32;
         let mut mass = high + medium + low; //%
 
         if !queues.high_priority.is_empty() {
-            let flip = self.rand.random_ratio(P::High.weight(), mass);
+            let flip = self.rand.random_ratio(P::High.probability(), mass);
             if flip {
                 return Ok(queues.high_priority.pop_front());
             }
-            mass -= P::High.weight();
+            mass -= P::High.probability();
         }
 
         if !queues.medium_priority.is_empty() {
-            let flip = self.rand.random_ratio(P::Medium.weight(), mass);
+            let flip = self.rand.random_ratio(P::Medium.probability(), mass);
             if flip {
                 return Ok(queues.medium_priority.pop_front());
             }
-            mass -= P::Medium.weight();
+            mass -= P::Medium.probability();
         }
 
         if !queues.low_priority.is_empty() {
-            let flip = self.rand.random_ratio(P::Low.weight(), mass);
+            let flip = self.rand.random_ratio(P::Low.probability(), mass);
             if flip {
                 return Ok(queues.low_priority.pop_front());
             }

crates/gpui/src/test.rs 🔗

@@ -27,6 +27,7 @@
 //! ```
 use crate::{Entity, Subscription, TestAppContext, TestDispatcher};
 use futures::StreamExt as _;
+use rand::prelude::*;
 use smol::channel;
 use std::{
     env,
@@ -53,7 +54,7 @@ pub fn run_test(
                 eprintln!("seed = {seed}");
             }
             let result = panic::catch_unwind(|| {
-                let dispatcher = TestDispatcher::new(seed);
+                let dispatcher = TestDispatcher::new(StdRng::seed_from_u64(seed));
                 test_fn(dispatcher, seed);
             });
 

crates/gpui/src/text_system/line_wrapper.rs 🔗

@@ -395,9 +395,10 @@ mod tests {
     use crate::{Font, FontFeatures, FontStyle, FontWeight, TestAppContext, TestDispatcher, font};
     #[cfg(target_os = "macos")]
     use crate::{TextRun, WindowTextSystem, WrapBoundary};
+    use rand::prelude::*;
 
     fn build_wrapper() -> LineWrapper {
-        let dispatcher = TestDispatcher::new(0);
+        let dispatcher = TestDispatcher::new(StdRng::seed_from_u64(0));
         let cx = TestAppContext::build(dispatcher, None);
         let id = cx.text_system().resolve_font(&font(".ZedMono"));
         LineWrapper::new(id, px(16.), cx.text_system().platform_text_system.clone())

crates/gpui/src/window.rs 🔗

@@ -217,77 +217,19 @@ slotmap::new_key_type! {
 }
 
 thread_local! {
-    /// Fallback arena used when no app-specific arena is active.
-    /// In production, each window draw sets CURRENT_ELEMENT_ARENA to the app's arena.
     pub(crate) static ELEMENT_ARENA: RefCell<Arena> = RefCell::new(Arena::new(1024 * 1024));
-
-    /// Points to the current App's element arena during draw operations.
-    /// This allows multiple test Apps to have isolated arenas, preventing
-    /// cross-session corruption when the scheduler interleaves their tasks.
-    static CURRENT_ELEMENT_ARENA: Cell<Option<*const RefCell<Arena>>> = const { Cell::new(None) };
-}
-
-/// Allocates an element in the current arena. Uses the app-specific arena if one
-/// is active (during draw), otherwise falls back to the thread-local ELEMENT_ARENA.
-pub(crate) fn with_element_arena<R>(f: impl FnOnce(&mut Arena) -> R) -> R {
-    CURRENT_ELEMENT_ARENA.with(|current| {
-        if let Some(arena_ptr) = current.get() {
-            // SAFETY: The pointer is valid for the duration of the draw operation
-            // that set it, and we're being called during that same draw.
-            let arena_cell = unsafe { &*arena_ptr };
-            f(&mut arena_cell.borrow_mut())
-        } else {
-            ELEMENT_ARENA.with_borrow_mut(f)
-        }
-    })
-}
-
-/// RAII guard that sets CURRENT_ELEMENT_ARENA for the duration of a draw operation.
-/// When dropped, restores the previous arena (supporting nested draws).
-pub(crate) struct ElementArenaScope {
-    previous: Option<*const RefCell<Arena>>,
-}
-
-impl ElementArenaScope {
-    /// Enter a scope where element allocations use the given arena.
-    pub(crate) fn enter(arena: &RefCell<Arena>) -> Self {
-        let previous = CURRENT_ELEMENT_ARENA.with(|current| {
-            let prev = current.get();
-            current.set(Some(arena as *const RefCell<Arena>));
-            prev
-        });
-        Self { previous }
-    }
-}
-
-impl Drop for ElementArenaScope {
-    fn drop(&mut self) {
-        CURRENT_ELEMENT_ARENA.with(|current| {
-            current.set(self.previous);
-        });
-    }
 }
 
 /// Returned when the element arena has been used and so must be cleared before the next draw.
 #[must_use]
-pub struct ArenaClearNeeded {
-    arena: *const RefCell<Arena>,
-}
+pub struct ArenaClearNeeded;
 
 impl ArenaClearNeeded {
-    /// Create a new ArenaClearNeeded that will clear the given arena.
-    pub(crate) fn new(arena: &RefCell<Arena>) -> Self {
-        Self {
-            arena: arena as *const RefCell<Arena>,
-        }
-    }
-
     /// Clear the element arena.
     pub fn clear(self) {
-        // SAFETY: The arena pointer is valid because ArenaClearNeeded is created
-        // at the end of draw() and must be cleared before the next draw.
-        let arena_cell = unsafe { &*self.arena };
-        arena_cell.borrow_mut().clear();
+        ELEMENT_ARENA.with_borrow_mut(|element_arena| {
+            element_arena.clear();
+        });
     }
 }
 
@@ -2133,10 +2075,6 @@ impl Window {
     /// the contents of the new [`Scene`], use [`Self::present`].
     #[profiling::function]
     pub fn draw(&mut self, cx: &mut App) -> ArenaClearNeeded {
-        // Set up the per-App arena for element allocation during this draw.
-        // This ensures that multiple test Apps have isolated arenas.
-        let _arena_scope = ElementArenaScope::enter(&cx.element_arena);
-
         self.invalidate_entities();
         cx.entities.clear_accessed();
         debug_assert!(self.rendered_entity_stack.is_empty());
@@ -2204,7 +2142,7 @@ impl Window {
         self.invalidator.set_phase(DrawPhase::None);
         self.needs_present.set(true);
 
-        ArenaClearNeeded::new(&cx.element_arena)
+        ArenaClearNeeded
     }
 
     fn record_entities_accessed(&mut self, cx: &mut App) {

crates/gpui_macros/src/test.rs 🔗

@@ -191,9 +191,9 @@ fn generate_test_function(
                     &[#seeds],
                     #max_retries,
                     &mut |dispatcher, _seed| {
-                        let foreground_executor = gpui::ForegroundExecutor::new(std::sync::Arc::new(dispatcher.clone()));
+                        let executor = gpui::BackgroundExecutor::new(std::sync::Arc::new(dispatcher.clone()));
                         #cx_vars
-                        foreground_executor.block_test(#inner_fn_name(#inner_fn_args));
+                        executor.block_test(#inner_fn_name(#inner_fn_args));
                         #cx_teardowns
                     },
                     #on_failure_fn_name

crates/language/src/buffer.rs 🔗

@@ -30,7 +30,7 @@ use fs::MTime;
 use futures::channel::oneshot;
 use gpui::{
     App, AppContext as _, Context, Entity, EventEmitter, HighlightStyle, SharedString, StyledText,
-    Task, TextStyle,
+    Task, TaskLabel, TextStyle,
 };
 
 use lsp::{LanguageServerId, NumberOrString};
@@ -53,7 +53,7 @@ use std::{
     ops::{Deref, Range},
     path::PathBuf,
     rc,
-    sync::Arc,
+    sync::{Arc, LazyLock},
     time::{Duration, Instant},
     vec,
 };
@@ -76,6 +76,10 @@ pub use {tree_sitter_python, tree_sitter_rust, tree_sitter_typescript};
 
 pub use lsp::DiagnosticSeverity;
 
+/// A label for the background task spawned by the buffer to compute
+/// a diff against the contents of its file.
+pub static BUFFER_DIFF_TASK: LazyLock<TaskLabel> = LazyLock::new(TaskLabel::new);
+
 /// Indicate whether a [`Buffer`] has permissions to edit.
 #[derive(PartialEq, Clone, Copy, Debug)]
 pub enum Capability {
@@ -1888,7 +1892,7 @@ impl Buffer {
         if let Some(indent_sizes) = self.compute_autoindents() {
             let indent_sizes = cx.background_spawn(indent_sizes);
             match cx
-                .foreground_executor()
+                .background_executor()
                 .block_with_timeout(block_budget, indent_sizes)
             {
                 Ok(indent_sizes) => self.apply_autoindents(indent_sizes, cx),
@@ -2147,17 +2151,18 @@ impl Buffer {
     pub fn diff(&self, mut new_text: String, cx: &App) -> Task<Diff> {
         let old_text = self.as_rope().clone();
         let base_version = self.version();
-        cx.background_spawn(async move {
-            let old_text = old_text.to_string();
-            let line_ending = LineEnding::detect(&new_text);
-            LineEnding::normalize(&mut new_text);
-            let edits = text_diff(&old_text, &new_text);
-            Diff {
-                base_version,
-                line_ending,
-                edits,
-            }
-        })
+        cx.background_executor()
+            .spawn_labeled(*BUFFER_DIFF_TASK, async move {
+                let old_text = old_text.to_string();
+                let line_ending = LineEnding::detect(&new_text);
+                LineEnding::normalize(&mut new_text);
+                let edits = text_diff(&old_text, &new_text);
+                Diff {
+                    base_version,
+                    line_ending,
+                    edits,
+                }
+            })
     }
 
     /// Spawns a background task that searches the buffer for any whitespace

crates/language/src/buffer_tests.rs 🔗

@@ -2962,8 +2962,8 @@ fn test_serialization(cx: &mut gpui::App) {
 
     let state = buffer1.read(cx).to_proto(cx);
     let ops = cx
-        .foreground_executor()
-        .block_on(buffer1.read(cx).serialize_ops(None, cx));
+        .background_executor()
+        .block(buffer1.read(cx).serialize_ops(None, cx));
     let buffer2 = cx.new(|cx| {
         let mut buffer =
             Buffer::from_proto(ReplicaId::new(1), Capability::ReadWrite, state, None).unwrap();
@@ -3300,8 +3300,8 @@ fn test_random_collaboration(cx: &mut App, mut rng: StdRng) {
         let buffer = cx.new(|cx| {
             let state = base_buffer.read(cx).to_proto(cx);
             let ops = cx
-                .foreground_executor()
-                .block_on(base_buffer.read(cx).serialize_ops(None, cx));
+                .background_executor()
+                .block(base_buffer.read(cx).serialize_ops(None, cx));
             let mut buffer =
                 Buffer::from_proto(ReplicaId::new(i as u16), Capability::ReadWrite, state, None)
                     .unwrap();
@@ -3415,8 +3415,8 @@ fn test_random_collaboration(cx: &mut App, mut rng: StdRng) {
             50..=59 if replica_ids.len() < max_peers => {
                 let old_buffer_state = buffer.read(cx).to_proto(cx);
                 let old_buffer_ops = cx
-                    .foreground_executor()
-                    .block_on(buffer.read(cx).serialize_ops(None, cx));
+                    .background_executor()
+                    .block(buffer.read(cx).serialize_ops(None, cx));
                 let new_replica_id = (0..=replica_ids.len() as u16)
                     .map(ReplicaId::new)
                     .filter(|replica_id| *replica_id != buffer.read(cx).replica_id())

crates/language/src/language_registry.rs 🔗

@@ -496,11 +496,6 @@ impl LanguageRegistry {
         servers_rx
     }
 
-    #[cfg(any(feature = "test-support", test))]
-    pub fn has_fake_lsp_server(&self, lsp_name: &LanguageServerName) -> bool {
-        self.state.read().fake_server_entries.contains_key(lsp_name)
-    }
-
     /// Adds a language to the registry, which can be loaded if needed.
     pub fn register_language(
         &self,
@@ -1138,9 +1133,10 @@ impl LanguageRegistry {
         binary: lsp::LanguageServerBinary,
         cx: &mut gpui::AsyncApp,
     ) -> Option<lsp::LanguageServer> {
+        use gpui::AppContext as _;
+
         let mut state = self.state.write();
         let fake_entry = state.fake_server_entries.get_mut(name)?;
-
         let (server, mut fake_server) = lsp::FakeLanguageServer::new(
             server_id,
             binary,
@@ -1154,9 +1150,17 @@ impl LanguageRegistry {
             initializer(&mut fake_server);
         }
 
-        // Emit synchronously so tests can reliably observe server creation even if the LSP startup
-        // task hasn't progressed to initialization yet.
-        fake_entry.tx.unbounded_send(fake_server).ok();
+        let tx = fake_entry.tx.clone();
+        cx.background_spawn(async move {
+            if fake_server
+                .try_receive_notification::<lsp::notification::Initialized>()
+                .await
+                .is_some()
+            {
+                tx.unbounded_send(fake_server.clone()).ok();
+            }
+        })
+        .detach();
 
         Some(server)
     }

crates/language_models/src/provider/mistral.rs 🔗

@@ -48,17 +48,18 @@ pub struct State {
     codestral_api_key_state: Entity<ApiKeyState>,
 }
 
+struct CodestralApiKey(Entity<ApiKeyState>);
+impl Global for CodestralApiKey {}
+
 pub fn codestral_api_key(cx: &mut App) -> Entity<ApiKeyState> {
-    // IMPORTANT:
-    // Do not store `Entity<T>` handles in process-wide statics (e.g. `OnceLock`).
-    //
-    // `Entity<T>` is tied to a particular `App`/entity-map context. Caching it globally can
-    // cause panics like "used a entity with the wrong context" when tests (or multiple apps)
-    // create distinct `App` instances in the same process.
-    //
-    // If we want a per-process singleton, store plain data (e.g. env var names) and create
-    // the entity per-App instead.
-    cx.new(|_| ApiKeyState::new(CODESTRAL_API_URL.into(), CODESTRAL_API_KEY_ENV_VAR.clone()))
+    if cx.has_global::<CodestralApiKey>() {
+        cx.global::<CodestralApiKey>().0.clone()
+    } else {
+        let api_key_state = cx
+            .new(|_| ApiKeyState::new(CODESTRAL_API_URL.into(), CODESTRAL_API_KEY_ENV_VAR.clone()));
+        cx.set_global(CodestralApiKey(api_key_state.clone()));
+        api_key_state
+    }
 }
 
 impl State {

crates/language_models/src/provider/open_ai.rs 🔗

@@ -1419,8 +1419,8 @@ mod tests {
         // Validate that all models are supported by tiktoken-rs
         for model in Model::iter() {
             let count = cx
-                .foreground_executor()
-                .block_on(count_open_ai_tokens(
+                .executor()
+                .block(count_open_ai_tokens(
                     request.clone(),
                     model,
                     &cx.app.borrow(),

crates/livekit_client/src/livekit_client/playback/source.rs 🔗

@@ -47,14 +47,17 @@ impl LiveKitStream {
         );
         let (queue_input, queue_output) = rodio::queue::queue(true);
         // spawn rtc stream
-        let receiver_task = executor.spawn_with_priority(gpui::Priority::RealtimeAudio, {
-            async move {
-                while let Some(frame) = stream.next().await {
-                    let samples = frame_to_samplesbuffer(frame);
-                    queue_input.append(samples);
+        let receiver_task = executor.spawn_with_priority(
+            gpui::Priority::Realtime(gpui::RealtimePriority::Audio),
+            {
+                async move {
+                    while let Some(frame) = stream.next().await {
+                        let samples = frame_to_samplesbuffer(frame);
+                        queue_input.append(samples);
+                    }
                 }
-            }
-        });
+            },
+        );
 
         LiveKitStream {
             _receiver_task: receiver_task,

crates/lsp/src/lsp.rs 🔗

@@ -1746,11 +1746,13 @@ impl FakeLanguageServer {
         T: request::Request,
         T::Result: 'static + Send,
     {
+        self.server.executor.start_waiting();
         self.server.request::<T>(params).await
     }
 
     /// Attempts [`Self::try_receive_notification`], unwrapping if it has not received the specified type yet.
     pub async fn receive_notification<T: notification::Notification>(&mut self) -> T::Params {
+        self.server.executor.start_waiting();
         self.try_receive_notification::<T>().await.unwrap()
     }
 

crates/miniprofiler_ui/src/miniprofiler_ui.rs 🔗

@@ -125,7 +125,7 @@ impl ProfilerWindow {
             loop {
                 let data = cx
                     .foreground_executor()
-                    .dispatcher()
+                    .dispatcher
                     .get_current_thread_timings();
 
                 this.update(cx, |this: &mut ProfilerWindow, cx| {

crates/multi_buffer/src/multi_buffer_tests.rs 🔗

@@ -78,8 +78,8 @@ fn test_remote(cx: &mut App) {
     let guest_buffer = cx.new(|cx| {
         let state = host_buffer.read(cx).to_proto(cx);
         let ops = cx
-            .foreground_executor()
-            .block_on(host_buffer.read(cx).serialize_ops(None, cx));
+            .background_executor()
+            .block(host_buffer.read(cx).serialize_ops(None, cx));
         let mut buffer =
             Buffer::from_proto(ReplicaId::REMOTE_SERVER, Capability::ReadWrite, state, None)
                 .unwrap();

crates/project/src/project_settings.rs 🔗

@@ -1182,7 +1182,7 @@ impl SettingsObserver {
     ) -> Task<()> {
         let mut user_tasks_file_rx =
             watch_config_file(cx.background_executor(), fs, file_path.clone());
-        let user_tasks_content = cx.foreground_executor().block_on(user_tasks_file_rx.next());
+        let user_tasks_content = cx.background_executor().block(user_tasks_file_rx.next());
         let weak_entry = cx.weak_entity();
         cx.spawn(async move |settings_observer, cx| {
             let Ok(task_store) = settings_observer.read_with(cx, |settings_observer, _| {
@@ -1233,7 +1233,7 @@ impl SettingsObserver {
     ) -> Task<()> {
         let mut user_tasks_file_rx =
             watch_config_file(cx.background_executor(), fs, file_path.clone());
-        let user_tasks_content = cx.foreground_executor().block_on(user_tasks_file_rx.next());
+        let user_tasks_content = cx.background_executor().block(user_tasks_file_rx.next());
         let weak_entry = cx.weak_entity();
         cx.spawn(async move |settings_observer, cx| {
             let Ok(task_store) = settings_observer.read_with(cx, |settings_observer, _| {

crates/project/src/project_tests.rs 🔗

@@ -9,7 +9,8 @@ use crate::{
 };
 use async_trait::async_trait;
 use buffer_diff::{
-    BufferDiffEvent, DiffHunkSecondaryStatus, DiffHunkStatus, DiffHunkStatusKind, assert_hunks,
+    BufferDiffEvent, CALCULATE_DIFF_TASK, DiffHunkSecondaryStatus, DiffHunkStatus,
+    DiffHunkStatusKind, assert_hunks,
 };
 use fs::FakeFs;
 use futures::{StreamExt, future};
@@ -210,8 +211,8 @@ async fn test_editorconfig_support(cx: &mut gpui::TestAppContext) {
                 .languages()
                 .load_language_for_file_path(file.path.as_std_path());
             let file_language = cx
-                .foreground_executor()
-                .block_on(file_language)
+                .background_executor()
+                .block(file_language)
                 .expect("Failed to get file language");
             let file = file as _;
             language_settings(Some(file_language.name()), Some(&file), cx).into_owned()
@@ -1461,7 +1462,6 @@ async fn test_reporting_fs_changes_to_language_servers(cx: &mut gpui::TestAppCon
     let prev_read_dir_count = fs.read_dir_call_count();
 
     let fake_server = fake_servers.next().await.unwrap();
-    cx.executor().run_until_parked();
     let server_id = lsp_store.read_with(cx, |lsp_store, _| {
         let (id, _) = lsp_store.language_server_statuses().next().unwrap();
         id
@@ -2077,7 +2077,6 @@ async fn test_restarting_server_with_diagnostics_running(cx: &mut gpui::TestAppC
     let buffer_id = buffer.read_with(cx, |buffer, _| buffer.remote_id());
     // Simulate diagnostics starting to update.
     let fake_server = fake_servers.next().await.unwrap();
-    cx.executor().run_until_parked();
     fake_server.start_progress(progress_token).await;
 
     // Restart the server before the diagnostics finish updating.
@@ -2088,7 +2087,6 @@ async fn test_restarting_server_with_diagnostics_running(cx: &mut gpui::TestAppC
 
     // Simulate the newly started server sending more diagnostics.
     let fake_server = fake_servers.next().await.unwrap();
-    cx.executor().run_until_parked();
     assert_eq!(
         events.next().await.unwrap(),
         Event::LanguageServerRemoved(LanguageServerId(0))
@@ -3355,8 +3353,6 @@ async fn test_definition(cx: &mut gpui::TestAppContext) {
         .unwrap();
 
     let fake_server = fake_servers.next().await.unwrap();
-    cx.executor().run_until_parked();
-
     fake_server.set_request_handler::<lsp::request::GotoDefinition, _, _>(|params, _| async move {
         let params = params.text_document_position_params;
         assert_eq!(
@@ -3467,7 +3463,6 @@ async fn test_completions_with_text_edit(cx: &mut gpui::TestAppContext) {
         .unwrap();
 
     let fake_server = fake_language_servers.next().await.unwrap();
-    cx.executor().run_until_parked();
 
     // When text_edit exists, it takes precedence over insert_text and label
     let text = "let a = obj.fqn";
@@ -3551,7 +3546,6 @@ async fn test_completions_with_edit_ranges(cx: &mut gpui::TestAppContext) {
         .unwrap();
 
     let fake_server = fake_language_servers.next().await.unwrap();
-    cx.executor().run_until_parked();
     let text = "let a = obj.fqn";
 
     // Test 1: When text_edit is None but text_edit_text exists with default edit_range
@@ -3689,7 +3683,6 @@ async fn test_completions_without_edit_ranges(cx: &mut gpui::TestAppContext) {
         .unwrap();
 
     let fake_server = fake_language_servers.next().await.unwrap();
-    cx.executor().run_until_parked();
 
     // Test 1: When text_edit is None but insert_text exists (no edit_range in defaults)
     let text = "let a = b.fqn";
@@ -3796,7 +3789,6 @@ async fn test_completions_with_carriage_returns(cx: &mut gpui::TestAppContext) {
         .unwrap();
 
     let fake_server = fake_language_servers.next().await.unwrap();
-    cx.executor().run_until_parked();
 
     let text = "let a = b.fqn";
     buffer.update(cx, |buffer, cx| buffer.set_text(text, cx));
@@ -3871,7 +3863,6 @@ async fn test_apply_code_actions_with_commands(cx: &mut gpui::TestAppContext) {
         .unwrap();
 
     let fake_server = fake_language_servers.next().await.unwrap();
-    cx.executor().run_until_parked();
 
     // Language server returns code actions that contain commands, and not edits.
     let actions = project.update(cx, |project, cx| {
@@ -4213,6 +4204,10 @@ async fn test_file_changes_multiple_times_on_disk(cx: &mut gpui::TestAppContext)
         .await
         .unwrap();
 
+    // Simulate buffer diffs being slow, so that they don't complete before
+    // the next file change occurs.
+    cx.executor().deprioritize(*language::BUFFER_DIFF_TASK);
+
     // Change the buffer's file on disk, and then wait for the file change
     // to be detected by the worktree, so that the buffer starts reloading.
     fs.save(
@@ -4264,6 +4259,10 @@ async fn test_edit_buffer_while_it_reloads(cx: &mut gpui::TestAppContext) {
         .await
         .unwrap();
 
+    // Simulate buffer diffs being slow, so that they don't complete before
+    // the next file change occurs.
+    cx.executor().deprioritize(*language::BUFFER_DIFF_TASK);
+
     // Change the buffer's file on disk, and then wait for the file change
     // to be detected by the worktree, so that the buffer starts reloading.
     fs.save(
@@ -5381,7 +5380,6 @@ async fn test_lsp_rename_notifications(cx: &mut gpui::TestAppContext) {
         .unwrap();
 
     let fake_server = fake_servers.next().await.unwrap();
-    cx.executor().run_until_parked();
     let response = project.update(cx, |project, cx| {
         let worktree = project.worktrees(cx).next().unwrap();
         let entry = worktree
@@ -5493,7 +5491,6 @@ async fn test_rename(cx: &mut gpui::TestAppContext) {
         .unwrap();
 
     let fake_server = fake_servers.next().await.unwrap();
-    cx.executor().run_until_parked();
 
     let response = project.update(cx, |project, cx| {
         project.prepare_rename(buffer.clone(), 7, cx)
@@ -7997,13 +7994,18 @@ async fn test_staging_hunks_with_delayed_fs_event(cx: &mut gpui::TestAppContext)
 #[gpui::test(iterations = 25)]
 async fn test_staging_random_hunks(
     mut rng: StdRng,
-    _executor: BackgroundExecutor,
+    executor: BackgroundExecutor,
     cx: &mut gpui::TestAppContext,
 ) {
     let operations = env::var("OPERATIONS")
         .map(|i| i.parse().expect("invalid `OPERATIONS` variable"))
         .unwrap_or(20);
 
+    // Try to induce races between diff recalculation and index writes.
+    if rng.random_bool(0.5) {
+        executor.deprioritize(*CALCULATE_DIFF_TASK);
+    }
+
     use DiffHunkSecondaryStatus::*;
     init_test(cx);
 

crates/project_panel/src/project_panel.rs 🔗

@@ -62,11 +62,7 @@ use ui::{
     ScrollAxes, ScrollableHandle, Scrollbars, StickyCandidate, Tooltip, WithScrollbar, prelude::*,
     v_flex,
 };
-use util::{
-    ResultExt, TakeUntilExt, TryFutureExt, maybe,
-    paths::compare_paths,
-    rel_path::{RelPath, RelPathBuf},
-};
+use util::{ResultExt, TakeUntilExt, TryFutureExt, maybe, paths::compare_paths, rel_path::RelPath};
 use workspace::{
     DraggedSelection, OpenInTerminal, OpenOptions, OpenVisible, PreviewTabsSettings, SelectedEntry,
     SplitDirection, Workspace,
@@ -3216,14 +3212,13 @@ impl ProjectPanel {
         destination: ProjectEntryId,
         destination_is_file: bool,
         cx: &mut Context<Self>,
-    ) -> Option<Task<Result<CreatedEntry>>> {
+    ) {
         if self
             .project
             .read(cx)
             .entry_is_worktree_root(entry_to_move, cx)
         {
-            self.move_worktree_root(entry_to_move, destination, cx);
-            None
+            self.move_worktree_root(entry_to_move, destination, cx)
         } else {
             self.move_worktree_entry(entry_to_move, destination, destination_is_file, cx)
         }
@@ -3258,53 +3253,38 @@ impl ProjectPanel {
         destination_entry: ProjectEntryId,
         destination_is_file: bool,
         cx: &mut Context<Self>,
-    ) -> Option<Task<Result<CreatedEntry>>> {
+    ) {
         if entry_to_move == destination_entry {
-            return None;
+            return;
         }
 
-        let (destination_worktree, rename_task) = self.project.update(cx, |project, cx| {
-            let Some(source_path) = project.path_for_entry(entry_to_move, cx) else {
-                return (None, None);
-            };
-            let Some(destination_path) = project.path_for_entry(destination_entry, cx) else {
-                return (None, None);
-            };
+        let destination_worktree = self.project.update(cx, |project, cx| {
+            let source_path = project.path_for_entry(entry_to_move, cx)?;
+            let destination_path = project.path_for_entry(destination_entry, cx)?;
             let destination_worktree_id = destination_path.worktree_id;
 
-            let destination_dir = if destination_is_file {
-                destination_path.path.parent().unwrap_or(RelPath::empty())
-            } else {
-                destination_path.path.as_ref()
-            };
-
-            let Some(source_name) = source_path.path.file_name() else {
-                return (None, None);
-            };
-            let Ok(source_name) = RelPath::unix(source_name) else {
-                return (None, None);
-            };
+            let mut destination_path = destination_path.path.as_ref();
+            if destination_is_file {
+                destination_path = destination_path.parent()?;
+            }
 
-            let mut new_path = destination_dir.to_rel_path_buf();
-            new_path.push(source_name);
-            let rename_task = (new_path.as_rel_path() != source_path.path.as_ref()).then(|| {
-                project.rename_entry(
+            let mut new_path = destination_path.to_rel_path_buf();
+            new_path.push(RelPath::unix(source_path.path.file_name()?).unwrap());
+            if new_path.as_rel_path() != source_path.path.as_ref() {
+                let task = project.rename_entry(
                     entry_to_move,
                     (destination_worktree_id, new_path).into(),
                     cx,
-                )
-            });
+                );
+                cx.foreground_executor().spawn(task).detach_and_log_err(cx);
+            }
 
-            (
-                project.worktree_id_for_entry(destination_entry, cx),
-                rename_task,
-            )
+            project.worktree_id_for_entry(destination_entry, cx)
         });
 
         if let Some(destination_worktree) = destination_worktree {
             self.expand_entry(destination_worktree, destination_entry, cx);
         }
-        rename_task
     }
 
     fn index_for_selection(&self, selection: SelectedEntry) -> Option<(usize, usize, usize)> {
@@ -4015,122 +3995,8 @@ impl ProjectPanel {
                 Some(())
             });
         } else {
-            let update_marks = !self.marked_entries.is_empty();
-            let active_selection = selections.active_selection;
-
-            // For folded selections, track the leaf suffix relative to the resolved
-            // entry so we can refresh it after the move completes.
-            let (folded_selection_info, folded_selection_entries): (
-                Vec<(ProjectEntryId, RelPathBuf)>,
-                HashSet<SelectedEntry>,
-            ) = {
-                let project = self.project.read(cx);
-                let mut info = Vec::new();
-                let mut folded_entries = HashSet::default();
-
-                for selection in selections.items() {
-                    let resolved_id = self.resolve_entry(selection.entry_id);
-                    if resolved_id == selection.entry_id {
-                        continue;
-                    }
-                    folded_entries.insert(*selection);
-                    let Some(source_path) = project.path_for_entry(resolved_id, cx) else {
-                        continue;
-                    };
-                    let Some(leaf_path) = project.path_for_entry(selection.entry_id, cx) else {
-                        continue;
-                    };
-                    let Ok(suffix) = leaf_path.path.strip_prefix(source_path.path.as_ref()) else {
-                        continue;
-                    };
-                    if suffix.as_unix_str().is_empty() {
-                        continue;
-                    }
-
-                    info.push((resolved_id, suffix.to_rel_path_buf()));
-                }
-                (info, folded_entries)
-            };
-
-            // Collect move tasks paired with their source entry ID so we can correlate
-            // results with folded selections that need refreshing.
-            let mut move_tasks: Vec<(ProjectEntryId, Task<Result<CreatedEntry>>)> = Vec::new();
             for entry in entries {
-                if let Some(task) = self.move_entry(entry.entry_id, target_entry_id, is_file, cx) {
-                    move_tasks.push((entry.entry_id, task));
-                }
-            }
-
-            if move_tasks.is_empty() {
-                return;
-            }
-
-            if folded_selection_info.is_empty() {
-                for (_, task) in move_tasks {
-                    task.detach_and_log_err(cx);
-                }
-            } else {
-                cx.spawn_in(window, async move |project_panel, cx| {
-                    // Await all move tasks and collect successful results
-                    let mut move_results: Vec<(ProjectEntryId, Entry)> = Vec::new();
-                    for (entry_id, task) in move_tasks {
-                        if let Some(CreatedEntry::Included(new_entry)) = task.await.log_err() {
-                            move_results.push((entry_id, new_entry));
-                        }
-                    }
-
-                    if move_results.is_empty() {
-                        return;
-                    }
-
-                    // For folded selections, we need to refresh the leaf paths (with suffixes)
-                    // because they may not be indexed yet after the parent directory was moved.
-                    // First collect the paths to refresh, then refresh them.
-                    let paths_to_refresh: Vec<(Entity<Worktree>, Arc<RelPath>)> = project_panel
-                        .update(cx, |project_panel, cx| {
-                            let project = project_panel.project.read(cx);
-                            folded_selection_info
-                                .iter()
-                                .filter_map(|(resolved_id, suffix)| {
-                                    let (_, new_entry) =
-                                        move_results.iter().find(|(id, _)| id == resolved_id)?;
-                                    let worktree = project.worktree_for_entry(new_entry.id, cx)?;
-                                    let leaf_path = new_entry.path.join(suffix);
-                                    Some((worktree, leaf_path))
-                                })
-                                .collect()
-                        })
-                        .ok()
-                        .unwrap_or_default();
-
-                    let refresh_tasks: Vec<_> = paths_to_refresh
-                        .into_iter()
-                        .filter_map(|(worktree, leaf_path)| {
-                            worktree.update(cx, |worktree, cx| {
-                                worktree
-                                    .as_local_mut()
-                                    .map(|local| local.refresh_entry(leaf_path, None, cx))
-                            })
-                        })
-                        .collect();
-
-                    for task in refresh_tasks {
-                        task.await.log_err();
-                    }
-
-                    if update_marks && !folded_selection_entries.is_empty() {
-                        project_panel
-                            .update(cx, |project_panel, cx| {
-                                project_panel.marked_entries.retain(|entry| {
-                                    !folded_selection_entries.contains(entry)
-                                        || *entry == active_selection
-                                });
-                                cx.notify();
-                            })
-                            .ok();
-                    }
-                })
-                .detach();
+                self.move_entry(entry.entry_id, target_entry_id, is_file, cx);
             }
         }
     }

crates/project_symbols/src/project_symbols.rs 🔗

@@ -63,7 +63,7 @@ impl ProjectSymbolsDelegate {
 
     fn filter(&mut self, query: &str, window: &mut Window, cx: &mut Context<Picker<Self>>) {
         const MAX_MATCHES: usize = 100;
-        let mut visible_matches = cx.foreground_executor().block_on(fuzzy::match_strings(
+        let mut visible_matches = cx.background_executor().block(fuzzy::match_strings(
             &self.visible_match_candidates,
             query,
             false,
@@ -72,7 +72,7 @@ impl ProjectSymbolsDelegate {
             &Default::default(),
             cx.background_executor().clone(),
         ));
-        let mut external_matches = cx.foreground_executor().block_on(fuzzy::match_strings(
+        let mut external_matches = cx.background_executor().block(fuzzy::match_strings(
             &self.external_match_candidates,
             query,
             false,

crates/remote_server/src/unix.rs 🔗

@@ -929,8 +929,8 @@ pub fn handle_settings_file_changes(
     settings_changed: impl Fn(Option<anyhow::Error>, &mut App) + 'static,
 ) {
     let server_settings_content = cx
-        .foreground_executor()
-        .block_on(server_settings_file.next())
+        .background_executor()
+        .block(server_settings_file.next())
         .unwrap();
     SettingsStore::update_global(cx, |store, cx| {
         store

crates/repl/Cargo.toml 🔗

@@ -16,7 +16,6 @@ doctest = false
 alacritty_terminal.workspace = true
 anyhow.workspace = true
 async-dispatcher.workspace = true
-async-task.workspace = true
 async-tungstenite = { workspace = true, features = ["tokio", "tokio-rustls-manual-roots", "tokio-runtime"] }
 base64.workspace = true
 client.workspace = true

crates/repl/src/repl.rs 🔗

@@ -12,7 +12,7 @@ mod session;
 use std::{sync::Arc, time::Duration};
 
 use async_dispatcher::{Dispatcher, Runnable, set_dispatcher};
-use gpui::{App, PlatformDispatcher, Priority, RunnableMeta};
+use gpui::{App, PlatformDispatcher, Priority, RunnableVariant};
 use project::Fs;
 pub use runtimelib::ExecutionState;
 
@@ -44,38 +44,18 @@ fn zed_dispatcher(cx: &mut App) -> impl Dispatcher {
     // just make that consistent so we have this dispatcher ready to go for
     // other crates in Zed.
     impl Dispatcher for ZedDispatcher {
-        #[track_caller]
         fn dispatch(&self, runnable: Runnable) {
-            use std::sync::{Arc, atomic::AtomicBool};
-            let location = core::panic::Location::caller();
-            let closed = Arc::new(AtomicBool::new(false));
-            let (wrapper, task) = async_task::Builder::new()
-                .metadata(RunnableMeta { location, closed })
-                .spawn(|_| async move { runnable.run() }, {
-                    let dispatcher = self.dispatcher.clone();
-                    move |r| dispatcher.dispatch(r, Priority::default())
-                });
-            wrapper.schedule();
-            task.detach();
+            self.dispatcher
+                .dispatch(RunnableVariant::Compat(runnable), None, Priority::default());
         }
 
-        #[track_caller]
         fn dispatch_after(&self, duration: Duration, runnable: Runnable) {
-            use std::sync::{Arc, atomic::AtomicBool};
-            let location = core::panic::Location::caller();
-            let closed = Arc::new(AtomicBool::new(false));
-            let (wrapper, task) = async_task::Builder::new()
-                .metadata(RunnableMeta { location, closed })
-                .spawn(|_| async move { runnable.run() }, {
-                    let dispatcher = self.dispatcher.clone();
-                    move |r| dispatcher.dispatch_after(duration, r)
-                });
-            wrapper.schedule();
-            task.detach();
+            self.dispatcher
+                .dispatch_after(duration, RunnableVariant::Compat(runnable));
         }
     }
 
     ZedDispatcher {
-        dispatcher: cx.background_executor().dispatcher().clone(),
+        dispatcher: cx.background_executor().dispatcher.clone(),
     }
 }

crates/scheduler/Cargo.toml 🔗

@@ -19,7 +19,6 @@ test-support = []
 async-task.workspace = true
 backtrace.workspace = true
 chrono.workspace = true
-flume = "0.11"
 futures.workspace = true
 parking_lot.workspace = true
 rand.workspace = true

crates/scheduler/src/executor.rs 🔗

@@ -1,4 +1,5 @@
-use crate::{Priority, RunnableMeta, Scheduler, SessionId, Timer};
+use crate::{Scheduler, SessionId, Timer};
+use futures::FutureExt as _;
 use std::{
     future::Future,
     marker::PhantomData,
@@ -6,20 +7,16 @@ use std::{
     panic::Location,
     pin::Pin,
     rc::Rc,
-    sync::{
-        Arc,
-        atomic::{AtomicBool, Ordering},
-    },
+    sync::Arc,
     task::{Context, Poll},
     thread::{self, ThreadId},
-    time::{Duration, Instant},
+    time::Duration,
 };
 
 #[derive(Clone)]
 pub struct ForegroundExecutor {
     session_id: SessionId,
     scheduler: Arc<dyn Scheduler>,
-    closed: Arc<AtomicBool>,
     not_send: PhantomData<Rc<()>>,
 }
 
@@ -28,29 +25,10 @@ impl ForegroundExecutor {
         Self {
             session_id,
             scheduler,
-            closed: Arc::new(AtomicBool::new(false)),
             not_send: PhantomData,
         }
     }
 
-    pub fn session_id(&self) -> SessionId {
-        self.session_id
-    }
-
-    pub fn scheduler(&self) -> &Arc<dyn Scheduler> {
-        &self.scheduler
-    }
-
-    /// Returns the closed flag for this executor.
-    pub fn closed(&self) -> &Arc<AtomicBool> {
-        &self.closed
-    }
-
-    /// Close this executor. Tasks will not run after this is called.
-    pub fn close(&self) {
-        self.closed.store(true, Ordering::SeqCst);
-    }
-
     #[track_caller]
     pub fn spawn<F>(&self, future: F) -> Task<F::Output>
     where
@@ -59,154 +37,61 @@ impl ForegroundExecutor {
     {
         let session_id = self.session_id;
         let scheduler = Arc::clone(&self.scheduler);
-        let location = Location::caller();
-        let closed = self.closed.clone();
-        let (runnable, task) = spawn_local_with_source_location(
-            future,
-            move |runnable| {
-                scheduler.schedule_foreground(session_id, runnable);
-            },
-            RunnableMeta { location, closed },
-        );
+        let (runnable, task) = spawn_local_with_source_location(future, move |runnable| {
+            scheduler.schedule_foreground(session_id, runnable);
+        });
         runnable.schedule();
         Task(TaskState::Spawned(task))
     }
 
     pub fn block_on<Fut: Future>(&self, future: Fut) -> Fut::Output {
-        use std::cell::Cell;
-
-        let output = Cell::new(None);
-        let future = async {
-            output.set(Some(future.await));
-        };
-        let mut future = std::pin::pin!(future);
-
-        self.scheduler
-            .block(Some(self.session_id), future.as_mut(), None);
-
-        output.take().expect("block_on future did not complete")
+        let mut output = None;
+        self.scheduler.block(
+            Some(self.session_id),
+            async { output = Some(future.await) }.boxed_local(),
+            None,
+        );
+        output.unwrap()
     }
 
-    /// Block until the future completes or timeout occurs.
-    /// Returns Ok(output) if completed, Err(future) if timed out.
-    pub fn block_with_timeout<Fut: Future>(
+    pub fn block_with_timeout<Fut: Unpin + Future>(
         &self,
         timeout: Duration,
-        future: Fut,
-    ) -> Result<Fut::Output, impl Future<Output = Fut::Output> + use<Fut>> {
-        use std::cell::Cell;
-
-        let output = Cell::new(None);
-        let mut future = Box::pin(future);
-
-        {
-            let future_ref = &mut future;
-            let wrapper = async {
-                output.set(Some(future_ref.await));
-            };
-            let mut wrapper = std::pin::pin!(wrapper);
-
-            self.scheduler
-                .block(Some(self.session_id), wrapper.as_mut(), Some(timeout));
-        }
-
-        match output.take() {
-            Some(value) => Ok(value),
-            None => Err(future),
-        }
+        mut future: Fut,
+    ) -> Result<Fut::Output, Fut> {
+        let mut output = None;
+        self.scheduler.block(
+            Some(self.session_id),
+            async { output = Some((&mut future).await) }.boxed_local(),
+            Some(timeout),
+        );
+        output.ok_or(future)
     }
 
     pub fn timer(&self, duration: Duration) -> Timer {
         self.scheduler.timer(duration)
     }
-
-    pub fn now(&self) -> Instant {
-        self.scheduler.clock().now()
-    }
 }
 
 #[derive(Clone)]
 pub struct BackgroundExecutor {
     scheduler: Arc<dyn Scheduler>,
-    closed: Arc<AtomicBool>,
 }
 
 impl BackgroundExecutor {
     pub fn new(scheduler: Arc<dyn Scheduler>) -> Self {
-        Self {
-            scheduler,
-            closed: Arc::new(AtomicBool::new(false)),
-        }
-    }
-
-    /// Returns the closed flag for this executor.
-    pub fn closed(&self) -> &Arc<AtomicBool> {
-        &self.closed
+        Self { scheduler }
     }
 
-    /// Close this executor. Tasks will not run after this is called.
-    pub fn close(&self) {
-        self.closed.store(true, Ordering::SeqCst);
-    }
-
-    #[track_caller]
     pub fn spawn<F>(&self, future: F) -> Task<F::Output>
-    where
-        F: Future + Send + 'static,
-        F::Output: Send + 'static,
-    {
-        self.spawn_with_priority(Priority::default(), future)
-    }
-
-    #[track_caller]
-    pub fn spawn_with_priority<F>(&self, priority: Priority, future: F) -> Task<F::Output>
     where
         F: Future + Send + 'static,
         F::Output: Send + 'static,
     {
         let scheduler = Arc::clone(&self.scheduler);
-        let location = Location::caller();
-        let closed = self.closed.clone();
-        let (runnable, task) = async_task::Builder::new()
-            .metadata(RunnableMeta { location, closed })
-            .spawn(
-                move |_| future,
-                move |runnable| {
-                    scheduler.schedule_background_with_priority(runnable, priority);
-                },
-            );
-        runnable.schedule();
-        Task(TaskState::Spawned(task))
-    }
-
-    /// Spawns a future on a dedicated realtime thread for audio processing.
-    #[track_caller]
-    pub fn spawn_realtime<F>(&self, future: F) -> Task<F::Output>
-    where
-        F: Future + Send + 'static,
-        F::Output: Send + 'static,
-    {
-        let location = Location::caller();
-        let closed = self.closed.clone();
-        let (tx, rx) = flume::bounded::<async_task::Runnable<RunnableMeta>>(1);
-
-        self.scheduler.spawn_realtime(Box::new(move || {
-            while let Ok(runnable) = rx.recv() {
-                if runnable.metadata().is_closed() {
-                    continue;
-                }
-                runnable.run();
-            }
-        }));
-
-        let (runnable, task) = async_task::Builder::new()
-            .metadata(RunnableMeta { location, closed })
-            .spawn(
-                move |_| future,
-                move |runnable| {
-                    let _ = tx.send(runnable);
-                },
-            );
+        let (runnable, task) = async_task::spawn(future, move |runnable| {
+            scheduler.schedule_background(runnable);
+        });
         runnable.schedule();
         Task(TaskState::Spawned(task))
     }
@@ -215,10 +100,6 @@ impl BackgroundExecutor {
         self.scheduler.timer(duration)
     }
 
-    pub fn now(&self) -> Instant {
-        self.scheduler.clock().now()
-    }
-
     pub fn scheduler(&self) -> &Arc<dyn Scheduler> {
         &self.scheduler
     }
@@ -240,7 +121,7 @@ enum TaskState<T> {
     Ready(Option<T>),
 
     /// A task that is currently running.
-    Spawned(async_task::Task<T, RunnableMeta>),
+    Spawned(async_task::Task<T>),
 }
 
 impl<T> Task<T> {
@@ -249,11 +130,6 @@ impl<T> Task<T> {
         Task(TaskState::Ready(Some(val)))
     }
 
-    /// Creates a Task from an async_task::Task
-    pub fn from_async_task(task: async_task::Task<T, RunnableMeta>) -> Self {
-        Task(TaskState::Spawned(task))
-    }
-
     pub fn is_ready(&self) -> bool {
         match &self.0 {
             TaskState::Ready(_) => true,
@@ -268,63 +144,6 @@ impl<T> Task<T> {
             Task(TaskState::Spawned(task)) => task.detach(),
         }
     }
-
-    /// Converts this task into a fallible task that returns `Option<T>`.
-    pub fn fallible(self) -> FallibleTask<T> {
-        FallibleTask(match self.0 {
-            TaskState::Ready(val) => FallibleTaskState::Ready(val),
-            TaskState::Spawned(task) => FallibleTaskState::Spawned(task.fallible()),
-        })
-    }
-}
-
-/// A task that returns `Option<T>` instead of panicking when cancelled.
-#[must_use]
-pub struct FallibleTask<T>(FallibleTaskState<T>);
-
-enum FallibleTaskState<T> {
-    /// A task that is ready to return a value
-    Ready(Option<T>),
-
-    /// A task that is currently running (wraps async_task::FallibleTask).
-    Spawned(async_task::FallibleTask<T, RunnableMeta>),
-}
-
-impl<T> FallibleTask<T> {
-    /// Creates a new fallible task that will resolve with the value.
-    pub fn ready(val: T) -> Self {
-        FallibleTask(FallibleTaskState::Ready(Some(val)))
-    }
-
-    /// Detaching a task runs it to completion in the background.
-    pub fn detach(self) {
-        match self.0 {
-            FallibleTaskState::Ready(_) => {}
-            FallibleTaskState::Spawned(task) => task.detach(),
-        }
-    }
-}
-
-impl<T> Future for FallibleTask<T> {
-    type Output = Option<T>;
-
-    fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
-        match unsafe { self.get_unchecked_mut() } {
-            FallibleTask(FallibleTaskState::Ready(val)) => Poll::Ready(val.take()),
-            FallibleTask(FallibleTaskState::Spawned(task)) => Pin::new(task).poll(cx),
-        }
-    }
-}
-
-impl<T> std::fmt::Debug for FallibleTask<T> {
-    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
-        match &self.0 {
-            FallibleTaskState::Ready(_) => f.debug_tuple("FallibleTask::Ready").finish(),
-            FallibleTaskState::Spawned(task) => {
-                f.debug_tuple("FallibleTask::Spawned").field(task).finish()
-            }
-        }
-    }
 }
 
 impl<T> Future for Task<T> {
@@ -339,19 +158,18 @@ impl<T> Future for Task<T> {
 }
 
 /// Variant of `async_task::spawn_local` that includes the source location of the spawn in panics.
+///
+/// Copy-modified from:
+/// <https://github.com/smol-rs/async-task/blob/ca9dbe1db9c422fd765847fa91306e30a6bb58a9/src/runnable.rs#L405>
 #[track_caller]
 fn spawn_local_with_source_location<Fut, S>(
     future: Fut,
     schedule: S,
-    metadata: RunnableMeta,
-) -> (
-    async_task::Runnable<RunnableMeta>,
-    async_task::Task<Fut::Output, RunnableMeta>,
-)
+) -> (async_task::Runnable, async_task::Task<Fut::Output, ()>)
 where
     Fut: Future + 'static,
     Fut::Output: 'static,
-    S: async_task::Schedule<RunnableMeta> + Send + Sync + 'static,
+    S: async_task::Schedule + Send + Sync + 'static,
 {
     #[inline]
     fn thread_id() -> ThreadId {
@@ -394,18 +212,12 @@ where
         }
     }
 
-    let location = metadata.location;
+    // Wrap the future into one that checks which thread it's on.
+    let future = Checked {
+        id: thread_id(),
+        inner: ManuallyDrop::new(future),
+        location: Location::caller(),
+    };
 
-    unsafe {
-        async_task::Builder::new()
-            .metadata(metadata)
-            .spawn_unchecked(
-                move |_| Checked {
-                    id: thread_id(),
-                    inner: ManuallyDrop::new(future),
-                    location,
-                },
-                schedule,
-            )
-    }
+    unsafe { async_task::spawn_unchecked(future, schedule) }
 }

crates/scheduler/src/scheduler.rs 🔗

@@ -9,119 +9,32 @@ pub use executor::*;
 pub use test_scheduler::*;
 
 use async_task::Runnable;
-use futures::channel::oneshot;
+use futures::{FutureExt as _, channel::oneshot, future::LocalBoxFuture};
 use std::{
     future::Future,
-    panic::Location,
     pin::Pin,
-    sync::{
-        Arc,
-        atomic::{AtomicBool, Ordering},
-    },
+    sync::Arc,
     task::{Context, Poll},
     time::Duration,
 };
 
-/// Task priority for background tasks.
-///
-/// Higher priority tasks are more likely to be scheduled before lower priority tasks,
-/// but this is not a strict guarantee - the scheduler may interleave tasks of different
-/// priorities to prevent starvation.
-#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Hash)]
-#[repr(u8)]
-pub enum Priority {
-    /// Realtime priority
-    ///
-    /// Spawning a task with this priority will spin it off on a separate thread dedicated just to that task. Only use for audio.
-    RealtimeAudio,
-    /// High priority - use for tasks critical to user experience/responsiveness.
-    High,
-    /// Medium priority - suitable for most use cases.
-    #[default]
-    Medium,
-    /// Low priority - use for background work that can be deprioritized.
-    Low,
-}
-
-impl Priority {
-    /// Returns the relative probability weight for this priority level.
-    /// Used by schedulers to determine task selection probability.
-    pub const fn weight(self) -> u32 {
-        match self {
-            Priority::High => 60,
-            Priority::Medium => 30,
-            Priority::Low => 10,
-            // realtime priorities are not considered for probability scheduling
-            Priority::RealtimeAudio => 0,
-        }
-    }
-}
-
-/// Metadata attached to runnables for debugging and profiling.
-#[derive(Clone)]
-pub struct RunnableMeta {
-    /// The source location where the task was spawned.
-    pub location: &'static Location<'static>,
-    /// Shared flag indicating whether the scheduler has been closed.
-    /// When true, tasks should be dropped without running.
-    pub closed: Arc<AtomicBool>,
-}
-
-impl RunnableMeta {
-    /// Returns true if the scheduler has been closed and this task should not run.
-    pub fn is_closed(&self) -> bool {
-        self.closed.load(Ordering::SeqCst)
-    }
-}
-
-impl std::fmt::Debug for RunnableMeta {
-    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
-        f.debug_struct("RunnableMeta")
-            .field("location", &self.location)
-            .field("closed", &self.is_closed())
-            .finish()
-    }
-}
-
 pub trait Scheduler: Send + Sync {
-    /// Block until the given future completes or timeout occurs.
-    ///
-    /// Returns `true` if the future completed, `false` if it timed out.
-    /// The future is passed as a pinned mutable reference so the caller
-    /// retains ownership and can continue polling or return it on timeout.
     fn block(
         &self,
         session_id: Option<SessionId>,
-        future: Pin<&mut dyn Future<Output = ()>>,
+        future: LocalBoxFuture<()>,
         timeout: Option<Duration>,
-    ) -> bool;
-
-    fn schedule_foreground(&self, session_id: SessionId, runnable: Runnable<RunnableMeta>);
-
-    /// Schedule a background task with the given priority.
-    fn schedule_background_with_priority(
-        &self,
-        runnable: Runnable<RunnableMeta>,
-        priority: Priority,
     );
-
-    /// Spawn a closure on a dedicated realtime thread for audio processing.
-    fn spawn_realtime(&self, f: Box<dyn FnOnce() + Send>);
-
-    /// Schedule a background task with default (medium) priority.
-    fn schedule_background(&self, runnable: Runnable<RunnableMeta>) {
-        self.schedule_background_with_priority(runnable, Priority::default());
-    }
-
+    fn schedule_foreground(&self, session_id: SessionId, runnable: Runnable);
+    fn schedule_background(&self, runnable: Runnable);
     fn timer(&self, timeout: Duration) -> Timer;
     fn clock(&self) -> Arc<dyn Clock>;
-
-    fn as_test(&self) -> Option<&TestScheduler> {
-        None
+    fn as_test(&self) -> &TestScheduler {
+        panic!("this is not a test scheduler")
     }
 }
 
-#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
+#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
 pub struct SessionId(u16);
 
 impl SessionId {
@@ -142,7 +55,7 @@ impl Future for Timer {
     type Output = ();
 
     fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<()> {
-        match Pin::new(&mut self.0).poll(cx) {
+        match self.0.poll_unpin(cx) {
             Poll::Ready(_) => Poll::Ready(()),
             Poll::Pending => Poll::Pending,
         }

crates/scheduler/src/test_scheduler.rs 🔗

@@ -1,18 +1,14 @@
 use crate::{
-    BackgroundExecutor, Clock, ForegroundExecutor, Priority, RunnableMeta, Scheduler, SessionId,
-    TestClock, Timer,
+    BackgroundExecutor, Clock, ForegroundExecutor, Scheduler, SessionId, TestClock, Timer,
 };
 use async_task::Runnable;
 use backtrace::{Backtrace, BacktraceFrame};
-use futures::channel::oneshot;
-use parking_lot::{Mutex, MutexGuard};
-use rand::{
-    distr::{StandardUniform, uniform::SampleRange, uniform::SampleUniform},
-    prelude::*,
-};
+use futures::{FutureExt as _, channel::oneshot, future::LocalBoxFuture};
+use parking_lot::Mutex;
+use rand::prelude::*;
 use std::{
     any::type_name_of_val,
-    collections::{BTreeMap, HashSet, VecDeque},
+    collections::{BTreeMap, VecDeque},
     env,
     fmt::Write,
     future::Future,
@@ -94,7 +90,6 @@ impl TestScheduler {
                 capture_pending_traces: config.capture_pending_traces,
                 pending_traces: BTreeMap::new(),
                 next_trace_id: TraceId(0),
-                is_main_thread: true,
             })),
             clock: Arc::new(TestClock::new()),
             thread: thread::current(),
@@ -105,8 +100,8 @@ impl TestScheduler {
         self.clock.clone()
     }
 
-    pub fn rng(&self) -> SharedRng {
-        SharedRng(self.rng.clone())
+    pub fn rng(&self) -> Arc<Mutex<StdRng>> {
+        self.rng.clone()
     }
 
     pub fn set_timeout_ticks(&self, timeout_ticks: RangeInclusive<usize>) {
@@ -121,25 +116,13 @@ impl TestScheduler {
         self.state.lock().allow_parking = false;
     }
 
-    pub fn parking_allowed(&self) -> bool {
-        self.state.lock().allow_parking
-    }
-
-    pub fn is_main_thread(&self) -> bool {
-        self.state.lock().is_main_thread
-    }
-
-    /// Allocate a new session ID for foreground task scheduling.
-    /// This is used by GPUI's TestDispatcher to map dispatcher instances to sessions.
-    pub fn allocate_session_id(&self) -> SessionId {
-        let mut state = self.state.lock();
-        state.next_session_id.0 += 1;
-        state.next_session_id
-    }
-
     /// Create a foreground executor for this scheduler
     pub fn foreground(self: &Arc<Self>) -> ForegroundExecutor {
-        let session_id = self.allocate_session_id();
+        let session_id = {
+            let mut state = self.state.lock();
+            state.next_session_id.0 += 1;
+            state.next_session_id
+        };
         ForegroundExecutor::new(session_id, self.clone())
     }
 
@@ -169,159 +152,38 @@ impl TestScheduler {
         }
     }
 
-    /// Execute one tick of the scheduler, processing expired timers and running
-    /// at most one task. Returns true if any work was done.
-    ///
-    /// This is the public interface for GPUI's TestDispatcher to drive task execution.
-    pub fn tick(&self) -> bool {
-        self.step_filtered(false)
-    }
-
-    /// Execute one tick, but only run background tasks (no foreground/session tasks).
-    /// Returns true if any work was done.
-    pub fn tick_background_only(&self) -> bool {
-        self.step_filtered(true)
-    }
-
-    /// Check if there are any pending tasks or timers that could run.
-    pub fn has_pending_tasks(&self) -> bool {
-        let state = self.state.lock();
-        !state.runnables.is_empty() || !state.timers.is_empty()
-    }
-
-    /// Returns counts of (foreground_tasks, background_tasks) currently queued.
-    /// Foreground tasks are those with a session_id, background tasks have none.
-    pub fn pending_task_counts(&self) -> (usize, usize) {
-        let state = self.state.lock();
-        let foreground = state
-            .runnables
-            .iter()
-            .filter(|r| r.session_id.is_some())
-            .count();
-        let background = state
-            .runnables
-            .iter()
-            .filter(|r| r.session_id.is_none())
-            .count();
-        (foreground, background)
-    }
-
     fn step(&self) -> bool {
-        self.step_filtered(false)
-    }
-
-    fn step_filtered(&self, background_only: bool) -> bool {
-        let (elapsed_count, runnables_before) = {
+        let elapsed_timers = {
             let mut state = self.state.lock();
             let end_ix = state
                 .timers
                 .partition_point(|timer| timer.expiration <= self.clock.now());
-            let elapsed: Vec<_> = state.timers.drain(..end_ix).collect();
-            let count = elapsed.len();
-            let runnables = state.runnables.len();
-            drop(state);
-            // Dropping elapsed timers here wakes the waiting futures
-            drop(elapsed);
-            (count, runnables)
+            state.timers.drain(..end_ix).collect::<Vec<_>>()
         };
 
-        if elapsed_count > 0 {
-            let runnables_after = self.state.lock().runnables.len();
-            if std::env::var("DEBUG_SCHEDULER").is_ok() {
-                eprintln!(
-                    "[scheduler] Expired {} timers at {:?}, runnables: {} -> {}",
-                    elapsed_count,
-                    self.clock.now(),
-                    runnables_before,
-                    runnables_after
-                );
-            }
+        if !elapsed_timers.is_empty() {
             return true;
         }
 
         let runnable = {
             let state = &mut *self.state.lock();
-
-            // Find candidate tasks:
-            // - For foreground tasks (with session_id), only the first task from each session
-            //   is a candidate (to preserve intra-session ordering)
-            // - For background tasks (no session_id), all are candidates
-            // - Tasks from blocked sessions are excluded
-            // - If background_only is true, skip foreground tasks entirely
-            let mut seen_sessions = HashSet::new();
-            let candidate_indices: Vec<usize> = state
-                .runnables
-                .iter()
-                .enumerate()
-                .filter(|(_, runnable)| {
-                    if let Some(session_id) = runnable.session_id {
-                        // Skip foreground tasks if background_only mode
-                        if background_only {
-                            return false;
-                        }
-                        // Exclude tasks from blocked sessions
-                        if state.blocked_sessions.contains(&session_id) {
-                            return false;
-                        }
-                        // Only include first task from each session (insert returns true if new)
-                        seen_sessions.insert(session_id)
-                    } else {
-                        // Background tasks are always candidates
-                        true
-                    }
-                })
-                .map(|(ix, _)| ix)
-                .collect();
-
-            if candidate_indices.is_empty() {
-                None
-            } else if state.randomize_order {
-                // Use priority-weighted random selection
-                let weights: Vec<u32> = candidate_indices
-                    .iter()
-                    .map(|&ix| state.runnables[ix].priority.weight())
-                    .collect();
-                let total_weight: u32 = weights.iter().sum();
-
-                if total_weight == 0 {
-                    // Fallback to uniform random if all weights are zero
-                    let choice = self.rng.lock().random_range(0..candidate_indices.len());
-                    state.runnables.remove(candidate_indices[choice])
-                } else {
-                    let mut target = self.rng.lock().random_range(0..total_weight);
-                    let mut selected_idx = 0;
-                    for (i, &weight) in weights.iter().enumerate() {
-                        if target < weight {
-                            selected_idx = i;
-                            break;
-                        }
-                        target -= weight;
-                    }
-                    state.runnables.remove(candidate_indices[selected_idx])
-                }
-            } else {
-                // Non-randomized: just take the first candidate task
-                state.runnables.remove(candidate_indices[0])
-            }
+            let ix = state.runnables.iter().position(|runnable| {
+                runnable
+                    .session_id
+                    .is_none_or(|session_id| !state.blocked_sessions.contains(&session_id))
+            });
+            ix.and_then(|ix| state.runnables.remove(ix))
         };
 
         if let Some(runnable) = runnable {
-            // Check if the executor that spawned this task was closed
-            if runnable.runnable.metadata().is_closed() {
-                return true;
-            }
-            let is_foreground = runnable.session_id.is_some();
-            let was_main_thread = self.state.lock().is_main_thread;
-            self.state.lock().is_main_thread = is_foreground;
             runnable.run();
-            self.state.lock().is_main_thread = was_main_thread;
             return true;
         }
 
         false
     }
 
-    pub fn advance_clock_to_next_timer(&self) -> bool {
+    fn advance_clock_to_next_timer(&self) -> bool {
         if let Some(timer) = self.state.lock().timers.first() {
             self.clock.advance(timer.expiration - self.clock.now());
             true
@@ -331,41 +193,18 @@ impl TestScheduler {
     }
 
     pub fn advance_clock(&self, duration: Duration) {
-        let debug = std::env::var("DEBUG_SCHEDULER").is_ok();
-        let start = self.clock.now();
-        let next_now = start + duration;
-        if debug {
-            let timer_count = self.state.lock().timers.len();
-            eprintln!(
-                "[scheduler] advance_clock({:?}) from {:?}, {} pending timers",
-                duration, start, timer_count
-            );
-        }
+        let next_now = self.clock.now() + duration;
         loop {
             self.run();
             if let Some(timer) = self.state.lock().timers.first()
                 && timer.expiration <= next_now
             {
-                let advance_to = timer.expiration;
-                if debug {
-                    eprintln!(
-                        "[scheduler] Advancing clock {:?} -> {:?} for timer",
-                        self.clock.now(),
-                        advance_to
-                    );
-                }
-                self.clock.advance(advance_to - self.clock.now());
+                self.clock.advance(timer.expiration - self.clock.now());
             } else {
                 break;
             }
         }
         self.clock.advance(next_now - self.clock.now());
-        if debug {
-            eprintln!(
-                "[scheduler] advance_clock done, now at {:?}",
-                self.clock.now()
-            );
-        }
     }
 
     fn park(&self, deadline: Option<Instant>) -> bool {
@@ -406,9 +245,9 @@ impl Scheduler for TestScheduler {
     fn block(
         &self,
         session_id: Option<SessionId>,
-        mut future: Pin<&mut dyn Future<Output = ()>>,
+        mut future: LocalBoxFuture<()>,
         timeout: Option<Duration>,
-    ) -> bool {
+    ) {
         if let Some(session_id) = session_id {
             self.state.lock().blocked_sessions.push(session_id);
         }
@@ -431,15 +270,10 @@ impl Scheduler for TestScheduler {
         };
         let mut cx = Context::from_waker(&waker);
 
-        let mut completed = false;
         for _ in 0..max_ticks {
-            match future.as_mut().poll(&mut cx) {
-                Poll::Ready(()) => {
-                    completed = true;
-                    break;
-                }
-                Poll::Pending => {}
-            }
+            let Poll::Pending = future.poll_unpin(&mut cx) else {
+                break;
+            };
 
             let mut stepped = None;
             while self.rng.lock().random() {
@@ -463,11 +297,9 @@ impl Scheduler for TestScheduler {
         if session_id.is_some() {
             self.state.lock().blocked_sessions.pop();
         }
-
-        completed
     }
 
-    fn schedule_foreground(&self, session_id: SessionId, runnable: Runnable<RunnableMeta>) {
+    fn schedule_foreground(&self, session_id: SessionId, runnable: Runnable) {
         let mut state = self.state.lock();
         let ix = if state.randomize_order {
             let start_ix = state
@@ -485,7 +317,6 @@ impl Scheduler for TestScheduler {
             ix,
             ScheduledRunnable {
                 session_id: Some(session_id),
-                priority: Priority::default(),
                 runnable,
             },
         );
@@ -493,11 +324,7 @@ impl Scheduler for TestScheduler {
         self.thread.unpark();
     }
 
-    fn schedule_background_with_priority(
-        &self,
-        runnable: Runnable<RunnableMeta>,
-        priority: Priority,
-    ) {
+    fn schedule_background(&self, runnable: Runnable) {
         let mut state = self.state.lock();
         let ix = if state.randomize_order {
             self.rng.lock().random_range(0..=state.runnables.len())
@@ -508,7 +335,6 @@ impl Scheduler for TestScheduler {
             ix,
             ScheduledRunnable {
                 session_id: None,
-                priority,
                 runnable,
             },
         );
@@ -516,12 +342,6 @@ impl Scheduler for TestScheduler {
         self.thread.unpark();
     }
 
-    fn spawn_realtime(&self, f: Box<dyn FnOnce() + Send>) {
-        std::thread::spawn(move || {
-            f();
-        });
-    }
-
     fn timer(&self, duration: Duration) -> Timer {
         let (tx, rx) = oneshot::channel();
         let state = &mut *self.state.lock();
@@ -537,8 +357,8 @@ impl Scheduler for TestScheduler {
         self.clock.clone()
     }
 
-    fn as_test(&self) -> Option<&TestScheduler> {
-        Some(self)
+    fn as_test(&self) -> &TestScheduler {
+        self
     }
 }
 
@@ -575,8 +395,7 @@ impl Default for TestSchedulerConfig {
 
 struct ScheduledRunnable {
     session_id: Option<SessionId>,
-    priority: Priority,
-    runnable: Runnable<RunnableMeta>,
+    runnable: Runnable,
 }
 
 impl ScheduledRunnable {
@@ -601,7 +420,6 @@ struct SchedulerState {
     capture_pending_traces: bool,
     next_trace_id: TraceId,
     pending_traces: BTreeMap<TraceId, Backtrace>,
-    is_main_thread: bool,
 }
 
 const WAKER_VTABLE: RawWakerVTable = RawWakerVTable::new(
@@ -690,46 +508,6 @@ impl TracingWaker {
 
 pub struct Yield(usize);
 
-/// A wrapper around `Arc<Mutex<StdRng>>` that provides convenient methods
-/// for random number generation without requiring explicit locking.
-#[derive(Clone)]
-pub struct SharedRng(Arc<Mutex<StdRng>>);
-
-impl SharedRng {
-    /// Lock the inner RNG for direct access. Use this when you need multiple
-    /// random operations without re-locking between each one.
-    pub fn lock(&self) -> MutexGuard<'_, StdRng> {
-        self.0.lock()
-    }
-
-    /// Generate a random value in the given range.
-    pub fn random_range<T, R>(&self, range: R) -> T
-    where
-        T: SampleUniform,
-        R: SampleRange<T>,
-    {
-        self.0.lock().random_range(range)
-    }
-
-    /// Generate a random boolean with the given probability of being true.
-    pub fn random_bool(&self, p: f64) -> bool {
-        self.0.lock().random_bool(p)
-    }
-
-    /// Generate a random value of the given type.
-    pub fn random<T>(&self) -> T
-    where
-        StandardUniform: Distribution<T>,
-    {
-        self.0.lock().random()
-    }
-
-    /// Generate a random ratio - true with probability `numerator/denominator`.
-    pub fn random_ratio(&self, numerator: u32, denominator: u32) -> bool {
-        self.0.lock().random_ratio(numerator, denominator)
-    }
-}
-
 impl Future for Yield {
     type Output = ();
 

crates/scheduler/src/tests.rs 🔗

@@ -238,7 +238,7 @@ fn test_block() {
 }
 
 #[test]
-#[should_panic(expected = "Parking forbidden. Pending traces:")]
+#[should_panic(expected = "futures_channel::oneshot::Inner")]
 fn test_parking_panics() {
     let config = TestSchedulerConfig {
         capture_pending_traces: true,
@@ -297,27 +297,20 @@ fn test_block_with_timeout() {
         let foreground = scheduler.foreground();
         let future = future::ready(42);
         let output = foreground.block_with_timeout(Duration::from_millis(100), future);
-        assert_eq!(output.ok(), Some(42));
+        assert_eq!(output.unwrap(), 42);
     });
 
     // Test case: future times out
     TestScheduler::once(async |scheduler| {
-        // Make timeout behavior deterministic by forcing the timeout tick budget to be exactly 0.
-        // This prevents `block_with_timeout` from making progress via extra scheduler stepping and
-        // accidentally completing work that we expect to time out.
-        scheduler.set_timeout_ticks(0..=0);
-
         let foreground = scheduler.foreground();
         let future = future::pending::<()>();
         let output = foreground.block_with_timeout(Duration::from_millis(50), future);
-        assert!(output.is_err(), "future should not have finished");
+        let _ = output.expect_err("future should not have finished");
     });
 
     // Test case: future makes progress via timer but still times out
     let mut results = BTreeSet::new();
     TestScheduler::many(100, async |scheduler| {
-        // Keep the existing probabilistic behavior here (do not force 0 ticks), since this subtest
-        // is explicitly checking that some seeds/timeouts can complete while others can time out.
         let task = scheduler.background().spawn(async move {
             Yield { polls: 10 }.await;
             42
@@ -331,44 +324,6 @@ fn test_block_with_timeout() {
         results.into_iter().collect::<Vec<_>>(),
         vec![None, Some(42)]
     );
-
-    // Regression test:
-    // A timed-out future must not be cancelled. The returned future should still be
-    // pollable to completion later. We also want to ensure time only advances when we
-    // explicitly advance it (not by yielding).
-    TestScheduler::once(async |scheduler| {
-        // Force immediate timeout: the timeout tick budget is 0 so we will not step or
-        // advance timers inside `block_with_timeout`.
-        scheduler.set_timeout_ticks(0..=0);
-
-        let background = scheduler.background();
-
-        // This task should only complete once time is explicitly advanced.
-        let task = background.spawn({
-            let scheduler = scheduler.clone();
-            async move {
-                scheduler.timer(Duration::from_millis(100)).await;
-                123
-            }
-        });
-
-        // This should time out before we advance time enough for the timer to fire.
-        let timed_out = scheduler
-            .foreground()
-            .block_with_timeout(Duration::from_millis(50), task);
-        assert!(
-            timed_out.is_err(),
-            "expected timeout before advancing the clock enough for the timer"
-        );
-
-        // Now explicitly advance time and ensure the returned future can complete.
-        let mut task = timed_out.err().unwrap();
-        scheduler.advance_clock(Duration::from_millis(100));
-        scheduler.run();
-
-        let output = scheduler.foreground().block_on(&mut task);
-        assert_eq!(output, 123);
-    });
 }
 
 // When calling block, we shouldn't make progress on foreground-spawned futures with the same session id.
@@ -415,64 +370,3 @@ impl Future for Yield {
         }
     }
 }
-
-#[test]
-fn test_background_priority_scheduling() {
-    use parking_lot::Mutex;
-
-    // Run many iterations to get statistical significance
-    let mut high_before_low_count = 0;
-    let iterations = 100;
-
-    for seed in 0..iterations {
-        let config = TestSchedulerConfig::with_seed(seed);
-        let scheduler = Arc::new(TestScheduler::new(config));
-        let background = scheduler.background();
-
-        let execution_order = Arc::new(Mutex::new(Vec::new()));
-
-        // Spawn low priority tasks first
-        for i in 0..3 {
-            let order = execution_order.clone();
-            background
-                .spawn_with_priority(Priority::Low, async move {
-                    order.lock().push(format!("low-{}", i));
-                })
-                .detach();
-        }
-
-        // Spawn high priority tasks second
-        for i in 0..3 {
-            let order = execution_order.clone();
-            background
-                .spawn_with_priority(Priority::High, async move {
-                    order.lock().push(format!("high-{}", i));
-                })
-                .detach();
-        }
-
-        scheduler.run();
-
-        // Count how many high priority tasks ran in the first half
-        let order = execution_order.lock();
-        let high_in_first_half = order
-            .iter()
-            .take(3)
-            .filter(|s| s.starts_with("high"))
-            .count();
-
-        if high_in_first_half >= 2 {
-            high_before_low_count += 1;
-        }
-    }
-
-    // High priority tasks should tend to run before low priority tasks
-    // With weights of 60 vs 10, high priority should dominate early execution
-    assert!(
-        high_before_low_count > iterations / 2,
-        "Expected high priority tasks to run before low priority tasks more often. \
-         Got {} out of {} iterations",
-        high_before_low_count,
-        iterations
-    );
-}

crates/storybook/src/stories/picker.rs 🔗

@@ -93,8 +93,8 @@ impl PickerDelegate for Delegate {
     ) -> Task<()> {
         let candidates = self.candidates.clone();
         self.matches = cx
-            .foreground_executor()
-            .block_on(fuzzy::match_strings(
+            .background_executor()
+            .block(fuzzy::match_strings(
                 &candidates,
                 &query,
                 true,

crates/terminal/src/terminal.rs 🔗

@@ -2750,7 +2750,7 @@ mod tests {
             })
         })
         .detach();
-        let completion_check_task = cx.background_spawn(async move {
+        cx.background_spawn(async move {
             // The channel may be closed if the terminal is dropped before sending
             // the completion signal, which can happen with certain task scheduling orders.
             let exit_status = completion_rx.recv().await.ok().flatten();
@@ -2764,7 +2764,8 @@ mod tests {
                 #[cfg(not(target_os = "windows"))]
                 assert_eq!(exit_status.code(), None);
             }
-        });
+        })
+        .detach();
 
         let mut all_events = Vec::new();
         while let Ok(Ok(new_event)) =
@@ -2779,8 +2780,6 @@ mod tests {
                 .any(|event| event == &Event::CloseTerminal),
             "Wrong shell command should update the title but not should not close the terminal to show the error message, but got events: {all_events:?}",
         );
-
-        completion_check_task.await;
     }
 
     #[test]

crates/workspace/src/workspace.rs 🔗

@@ -11541,9 +11541,6 @@ mod tests {
                 window,
                 cx,
             );
-        });
-        cx.run_until_parked();
-        workspace.update_in(cx, |workspace, window, cx| {
             workspace.move_item_to_pane_at_index(
                 &MoveItemToPane {
                     destination: 3,

crates/worktree/src/worktree.rs 🔗

@@ -252,6 +252,7 @@ pub struct LocalSnapshot {
     /// The file handle of the worktree root
     /// (so we can find it after it's been moved)
     root_file_handle: Option<Arc<dyn fs::FileHandle>>,
+    executor: BackgroundExecutor,
 }
 
 struct BackgroundScannerState {
@@ -417,6 +418,7 @@ impl Worktree {
                     PathStyle::local(),
                 ),
                 root_file_handle,
+                executor: cx.background_executor().clone(),
             };
 
             let worktree_id = snapshot.id();
@@ -459,8 +461,7 @@ impl Worktree {
                         entry.is_hidden = settings.is_path_hidden(path);
                     }
                 }
-                cx.foreground_executor()
-                    .block_on(snapshot.insert_entry(entry, fs.as_ref()));
+                snapshot.insert_entry(entry, fs.as_ref());
             }
 
             let (scan_requests_tx, scan_requests_rx) = channel::unbounded();
@@ -2570,11 +2571,11 @@ impl LocalSnapshot {
         }
     }
 
-    async fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
+    fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
         log::trace!("insert entry {:?}", entry.path);
         if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
             let abs_path = self.absolutize(&entry.path);
-            match build_gitignore(&abs_path, fs).await {
+            match self.executor.block(build_gitignore(&abs_path, fs)) {
                 Ok(ignore) => {
                     self.ignores_by_parent_abs_path
                         .insert(abs_path.parent().unwrap().into(), (Arc::new(ignore), true));
@@ -2868,7 +2869,7 @@ impl BackgroundScannerState {
     }
 
     async fn insert_entry(&mut self, entry: Entry, fs: &dyn Fs, watcher: &dyn Watcher) -> Entry {
-        let entry = self.snapshot.insert_entry(entry, fs).await;
+        let entry = self.snapshot.insert_entry(entry, fs);
         if entry.path.file_name() == Some(&DOT_GIT) {
             self.insert_git_repository(entry.path.clone(), fs, watcher)
                 .await;
@@ -4826,40 +4827,39 @@ impl BackgroundScanner {
 
     async fn ignores_needing_update(&self) -> Vec<Arc<Path>> {
         let mut ignores_to_update = Vec::new();
-        let mut excludes_to_load: Vec<(Arc<Path>, PathBuf)> = Vec::new();
 
-        // First pass: collect updates and drop stale entries without awaiting.
         {
             let snapshot = &mut self.state.lock().await.snapshot;
             let abs_path = snapshot.abs_path.clone();
-            let mut repo_exclude_keys_to_remove: Vec<Arc<Path>> = Vec::new();
-
-            for (work_dir_abs_path, (_, needs_update)) in
-                snapshot.repo_exclude_by_work_dir_abs_path.iter_mut()
-            {
-                let repository = snapshot
-                    .git_repositories
-                    .iter()
-                    .find(|(_, repo)| &repo.work_directory_abs_path == work_dir_abs_path);
 
-                if *needs_update {
-                    *needs_update = false;
-                    ignores_to_update.push(work_dir_abs_path.clone());
+            snapshot.repo_exclude_by_work_dir_abs_path.retain(
+                |work_dir_abs_path, (exclude, needs_update)| {
+                    if *needs_update {
+                        *needs_update = false;
+                        ignores_to_update.push(work_dir_abs_path.clone());
 
-                    if let Some((_, repository)) = repository {
-                        let exclude_abs_path = repository.common_dir_abs_path.join(REPO_EXCLUDE);
-                        excludes_to_load.push((work_dir_abs_path.clone(), exclude_abs_path));
+                        if let Some((_, repository)) = snapshot
+                            .git_repositories
+                            .iter()
+                            .find(|(_, repo)| &repo.work_directory_abs_path == work_dir_abs_path)
+                        {
+                            let exclude_abs_path =
+                                repository.common_dir_abs_path.join(REPO_EXCLUDE);
+                            if let Ok(current_exclude) = self
+                                .executor
+                                .block(build_gitignore(&exclude_abs_path, self.fs.as_ref()))
+                            {
+                                *exclude = Arc::new(current_exclude);
+                            }
+                        }
                     }
-                }
-
-                if repository.is_none() {
-                    repo_exclude_keys_to_remove.push(work_dir_abs_path.clone());
-                }
-            }
 
-            for key in repo_exclude_keys_to_remove {
-                snapshot.repo_exclude_by_work_dir_abs_path.remove(&key);
-            }
+                    snapshot
+                        .git_repositories
+                        .iter()
+                        .any(|(_, repo)| &repo.work_directory_abs_path == work_dir_abs_path)
+                },
+            );
 
             snapshot
                 .ignores_by_parent_abs_path
@@ -4884,29 +4884,6 @@ impl BackgroundScanner {
                 });
         }
 
-        // Load gitignores asynchronously (outside the lock)
-        let mut loaded_excludes: Vec<(Arc<Path>, Arc<Gitignore>)> = Vec::new();
-        for (work_dir_abs_path, exclude_abs_path) in excludes_to_load {
-            if let Ok(current_exclude) = build_gitignore(&exclude_abs_path, self.fs.as_ref()).await
-            {
-                loaded_excludes.push((work_dir_abs_path, Arc::new(current_exclude)));
-            }
-        }
-
-        // Second pass: apply updates.
-        if !loaded_excludes.is_empty() {
-            let snapshot = &mut self.state.lock().await.snapshot;
-
-            for (work_dir_abs_path, exclude) in loaded_excludes {
-                if let Some((existing_exclude, _)) = snapshot
-                    .repo_exclude_by_work_dir_abs_path
-                    .get_mut(&work_dir_abs_path)
-                {
-                    *existing_exclude = exclude;
-                }
-            }
-        }
-
         ignores_to_update
     }
 

crates/zed/src/main.rs 🔗

@@ -529,9 +529,9 @@ fn main() {
         debugger_tools::init(cx);
         client::init(&client, cx);
 
-        let system_id = cx.foreground_executor().block_on(system_id).ok();
-        let installation_id = cx.foreground_executor().block_on(installation_id).ok();
-        let session = cx.foreground_executor().block_on(session);
+        let system_id = cx.background_executor().block(system_id).ok();
+        let installation_id = cx.background_executor().block(installation_id).ok();
+        let session = cx.background_executor().block(session);
 
         let telemetry = client.telemetry();
         telemetry.start(
@@ -1553,7 +1553,7 @@ fn load_embedded_fonts(cx: &App) {
     let embedded_fonts = Mutex::new(Vec::new());
     let executor = cx.background_executor();
 
-    cx.foreground_executor().block_on(executor.scoped(|scope| {
+    executor.block(executor.scoped(|scope| {
         for font_path in &font_paths {
             if !font_path.ends_with(".ttf") {
                 continue;

crates/zed/src/reliability.rs 🔗

@@ -120,7 +120,7 @@ fn save_hang_trace(
     background_executor: &gpui::BackgroundExecutor,
     hang_time: chrono::DateTime<chrono::Local>,
 ) {
-    let thread_timings = background_executor.dispatcher().get_all_timings();
+    let thread_timings = background_executor.dispatcher.get_all_timings();
     let thread_timings = thread_timings
         .into_iter()
         .map(|mut timings| {

crates/zed/src/visual_test_runner.rs 🔗

@@ -260,7 +260,7 @@ fn run_visual_tests(project_path: PathBuf, update_baseline: bool) -> Result<()>
     // worktree creation spawns foreground tasks via cx.spawn
     // Allow parking since filesystem operations happen outside the test dispatcher
     cx.background_executor.allow_parking();
-    let worktree_result = cx.foreground_executor.block_test(add_worktree_task);
+    let worktree_result = cx.background_executor.block_test(add_worktree_task);
     cx.background_executor.forbid_parking();
     worktree_result.context("Failed to add worktree")?;
 
@@ -275,7 +275,7 @@ fn run_visual_tests(project_path: PathBuf, update_baseline: bool) -> Result<()>
 
     cx.background_executor.allow_parking();
     let panel = cx
-        .foreground_executor
+        .background_executor
         .block_test(ProjectPanel::load(weak_workspace, async_window_cx))
         .context("Failed to load project panel")?;
     cx.background_executor.forbid_parking();
@@ -316,7 +316,7 @@ fn run_visual_tests(project_path: PathBuf, update_baseline: bool) -> Result<()>
 
     if let Some(task) = open_file_task {
         cx.background_executor.allow_parking();
-        let block_result = cx.foreground_executor.block_test(task);
+        let block_result = cx.background_executor.block_test(task);
         cx.background_executor.forbid_parking();
         if let Ok(item) = block_result {
             workspace_window
@@ -912,7 +912,7 @@ fn run_breakpoint_hover_visual_tests(
         .context("Failed to start adding worktree")?;
 
     cx.background_executor.allow_parking();
-    let worktree_result = cx.foreground_executor.block_test(add_worktree_task);
+    let worktree_result = cx.background_executor.block_test(add_worktree_task);
     cx.background_executor.forbid_parking();
     worktree_result.context("Failed to add worktree")?;
 
@@ -937,7 +937,7 @@ fn run_breakpoint_hover_visual_tests(
 
     if let Some(task) = open_file_task {
         cx.background_executor.allow_parking();
-        let _ = cx.foreground_executor.block_test(task);
+        let _ = cx.background_executor.block_test(task);
         cx.background_executor.forbid_parking();
     }
 
@@ -1198,7 +1198,7 @@ import { AiPaneTabContext } from 'context';
     });
 
     cx.background_executor.allow_parking();
-    let _ = cx.foreground_executor.block_test(add_worktree_task);
+    let _ = cx.background_executor.block_test(add_worktree_task);
     cx.background_executor.forbid_parking();
 
     cx.run_until_parked();
@@ -1333,7 +1333,7 @@ import { AiPaneTabContext } from 'context';
 
     if let Some(task) = open_file_task {
         cx.background_executor.allow_parking();
-        let _ = cx.foreground_executor.block_test(task);
+        let _ = cx.background_executor.block_test(task);
         cx.background_executor.forbid_parking();
     }
 
@@ -1478,7 +1478,7 @@ fn run_agent_thread_view_test(
 
     cx.background_executor.allow_parking();
     let (worktree, _) = cx
-        .foreground_executor
+        .background_executor
         .block_test(add_worktree_task)
         .context("Failed to add worktree")?;
     cx.background_executor.forbid_parking();
@@ -1528,7 +1528,7 @@ fn run_agent_thread_view_test(
     let run_task = cx.update(|cx| tool.clone().run(input, event_stream, cx));
 
     cx.background_executor.allow_parking();
-    let run_result = cx.foreground_executor.block_test(run_task);
+    let run_result = cx.background_executor.block_test(run_task);
     cx.background_executor.forbid_parking();
     run_result.context("ReadFileTool failed")?;
 
@@ -1609,7 +1609,7 @@ fn run_agent_thread_view_test(
         cx.update(|cx| prompt_store::PromptBuilder::load(app_state.fs.clone(), false, cx));
     cx.background_executor.allow_parking();
     let panel = cx
-        .foreground_executor
+        .background_executor
         .block_test(AgentPanel::load(
             weak_workspace,
             prompt_builder,
@@ -1653,7 +1653,7 @@ fn run_agent_thread_view_test(
     });
 
     cx.background_executor.allow_parking();
-    let send_result = cx.foreground_executor.block_test(send_future);
+    let send_result = cx.background_executor.block_test(send_future);
     cx.background_executor.forbid_parking();
     send_result.context("Failed to send message")?;
 

crates/zed/src/zed.rs 🔗

@@ -1529,12 +1529,12 @@ pub fn handle_settings_file_changes(
 
     // Initial load of both settings files
     let global_content = cx
-        .foreground_executor()
-        .block_on(global_settings_file_rx.next())
+        .background_executor()
+        .block(global_settings_file_rx.next())
         .unwrap();
     let user_content = cx
-        .foreground_executor()
-        .block_on(user_settings_file_rx.next())
+        .background_executor()
+        .block(user_settings_file_rx.next())
         .unwrap();
 
     SettingsStore::update_global(cx, |store, cx| {
@@ -2195,7 +2195,7 @@ pub(crate) fn eager_load_active_theme_and_icon_theme(fs: Arc<dyn Fs>, cx: &mut A
         return;
     }
 
-    cx.foreground_executor().block_on(executor.scoped(|scope| {
+    executor.block(executor.scoped(|scope| {
         for load_target in themes_to_load {
             let theme_registry = &theme_registry;
             let reload_tasks = &reload_tasks;