Detailed changes
@@ -7348,6 +7348,7 @@ dependencies = [
"calloop",
"calloop-wayland-source",
"cbindgen",
+ "chrono",
"circular-buffer",
"cocoa 0.26.0",
"cocoa-foundation 0.2.0",
@@ -7397,6 +7398,7 @@ dependencies = [
"refineable",
"reqwest_client",
"resvg",
+ "scheduler",
"schemars",
"seahash",
"semver",
@@ -13597,6 +13599,7 @@ dependencies = [
"alacritty_terminal",
"anyhow",
"async-dispatcher",
+ "async-task",
"async-tungstenite",
"base64 0.22.1",
"client",
@@ -380,6 +380,7 @@ rodio = { git = "https://github.com/RustAudio/rodio", rev ="e2074c6c2acf07b57cf7
rope = { path = "crates/rope" }
rpc = { path = "crates/rpc" }
rules_library = { path = "crates/rules_library" }
+scheduler = { path = "crates/scheduler" }
search = { path = "crates/search" }
session = { path = "crates/session" }
settings = { path = "crates/settings" }
@@ -1337,9 +1337,10 @@ impl EvalAssertion {
}
fn run_eval(eval: EvalInput) -> eval_utils::EvalOutput<EditEvalMetadata> {
- let dispatcher = gpui::TestDispatcher::new(StdRng::from_os_rng());
+ let dispatcher = gpui::TestDispatcher::new(rand::random());
let mut cx = TestAppContext::build(dispatcher, None);
- let result = cx.executor().block_test(async {
+ let foreground_executor = cx.foreground_executor().clone();
+ let result = foreground_executor.block_test(async {
let test = EditAgentTest::new(&mut cx).await;
test.eval(eval, &mut cx).await
});
@@ -1501,7 +1501,7 @@ pub(crate) fn search_symbols(
});
const MAX_MATCHES: usize = 100;
- let mut visible_matches = cx.background_executor().block(fuzzy::match_strings(
+ let mut visible_matches = cx.foreground_executor().block_on(fuzzy::match_strings(
&visible_match_candidates,
&query,
false,
@@ -1510,7 +1510,7 @@ pub(crate) fn search_symbols(
&cancellation_flag,
cx.background_executor().clone(),
));
- let mut external_matches = cx.background_executor().block(fuzzy::match_strings(
+ let mut external_matches = cx.foreground_executor().block_on(fuzzy::match_strings(
&external_match_candidates,
&query,
false,
@@ -2102,9 +2102,9 @@ pub mod test {
cx.set_global(inline_assistant);
});
- let project = cx
- .executor()
- .block_test(async { Project::test(fs.clone(), [], cx).await });
+ let foreground_executor = cx.foreground_executor().clone();
+ let project =
+ foreground_executor.block_test(async { Project::test(fs.clone(), [], cx).await });
// Create workspace with window
let (workspace, cx) = cx.add_window_view(|window, cx| {
@@ -2162,8 +2162,7 @@ pub mod test {
test(cx);
- let assist_id = cx
- .executor()
+ let assist_id = foreground_executor
.block_test(async { completion_rx.next().await })
.unwrap()
.unwrap();
@@ -2206,7 +2205,6 @@ pub mod evals {
use eval_utils::{EvalOutput, NoProcessor};
use gpui::TestAppContext;
use language_model::{LanguageModelRegistry, SelectedModel};
- use rand::{SeedableRng as _, rngs::StdRng};
use crate::inline_assistant::test::{InlineAssistantOutput, run_inline_assistant_test};
@@ -2308,7 +2306,7 @@ pub mod evals {
let prompt = prompt.into();
eval_utils::eval(iterations, expected_pass_ratio, NoProcessor, move || {
- let dispatcher = gpui::TestDispatcher::new(StdRng::from_os_rng());
+ let dispatcher = gpui::TestDispatcher::new(rand::random());
let mut cx = TestAppContext::build(dispatcher, None);
cx.skip_drawing();
@@ -4,7 +4,8 @@ use agent_settings::AgentSettings;
use collections::{HashMap, HashSet, IndexMap};
use fuzzy::{StringMatch, StringMatchCandidate, match_strings};
use gpui::{
- Action, AnyElement, App, BackgroundExecutor, DismissEvent, FocusHandle, Subscription, Task,
+ Action, AnyElement, App, BackgroundExecutor, DismissEvent, FocusHandle, ForegroundExecutor,
+ Subscription, Task,
};
use language_model::{
AuthenticateError, ConfiguredModel, IconOrSvg, LanguageModel, LanguageModelId,
@@ -361,22 +362,28 @@ enum LanguageModelPickerEntry {
struct ModelMatcher {
models: Vec<ModelInfo>,
+ fg_executor: ForegroundExecutor,
bg_executor: BackgroundExecutor,
candidates: Vec<StringMatchCandidate>,
}
impl ModelMatcher {
- fn new(models: Vec<ModelInfo>, bg_executor: BackgroundExecutor) -> ModelMatcher {
+ fn new(
+ models: Vec<ModelInfo>,
+ fg_executor: ForegroundExecutor,
+ bg_executor: BackgroundExecutor,
+ ) -> ModelMatcher {
let candidates = Self::make_match_candidates(&models);
Self {
models,
+ fg_executor,
bg_executor,
candidates,
}
}
pub fn fuzzy_search(&self, query: &str) -> Vec<ModelInfo> {
- let mut matches = self.bg_executor.block(match_strings(
+ let mut matches = self.fg_executor.block_on(match_strings(
&self.candidates,
query,
false,
@@ -472,6 +479,7 @@ impl PickerDelegate for LanguageModelPickerDelegate {
) -> Task<()> {
let all_models = self.all_models.clone();
let active_model = (self.get_active_model)(cx);
+ let fg_executor = cx.foreground_executor();
let bg_executor = cx.background_executor();
let language_model_registry = LanguageModelRegistry::global(cx);
@@ -503,8 +511,10 @@ impl PickerDelegate for LanguageModelPickerDelegate {
.cloned()
.collect::<Vec<_>>();
- let matcher_rec = ModelMatcher::new(recommended_models, bg_executor.clone());
- let matcher_all = ModelMatcher::new(available_models, bg_executor.clone());
+ let matcher_rec =
+ ModelMatcher::new(recommended_models, fg_executor.clone(), bg_executor.clone());
+ let matcher_all =
+ ModelMatcher::new(available_models, fg_executor.clone(), bg_executor.clone());
let recommended = matcher_rec.exact_search(&query);
let all = matcher_all.fuzzy_search(&query);
@@ -749,7 +759,11 @@ mod tests {
("ollama", "mistral"),
("ollama", "deepseek"),
]);
- let matcher = ModelMatcher::new(models, cx.background_executor.clone());
+ let matcher = ModelMatcher::new(
+ models,
+ cx.foreground_executor().clone(),
+ cx.background_executor.clone(),
+ );
// The order of models should be maintained, case doesn't matter
let results = matcher.exact_search("GPT-4.1");
@@ -777,7 +791,11 @@ mod tests {
("ollama", "mistral"),
("ollama", "deepseek"),
]);
- let matcher = ModelMatcher::new(models, cx.background_executor.clone());
+ let matcher = ModelMatcher::new(
+ models,
+ cx.foreground_executor().clone(),
+ cx.background_executor.clone(),
+ );
// Results should preserve models order whenever possible.
// In the case below, `zed/gpt-4.1` and `openai/gpt-4.1` have identical
@@ -6,7 +6,7 @@ use fs::Fs;
use fuzzy::{StringMatch, StringMatchCandidate, match_strings};
use gpui::{
Action, AnyElement, App, BackgroundExecutor, Context, DismissEvent, Entity, FocusHandle,
- Focusable, SharedString, Subscription, Task, Window,
+ Focusable, ForegroundExecutor, SharedString, Subscription, Task, Window,
};
use picker::{Picker, PickerDelegate, popover_menu::PickerPopoverMenu};
use settings::{Settings as _, SettingsStore, update_settings_file};
@@ -103,6 +103,7 @@ impl ProfileSelector {
self.fs.clone(),
self.provider.clone(),
self.profiles.clone(),
+ cx.foreground_executor().clone(),
cx.background_executor().clone(),
self.focus_handle.clone(),
cx,
@@ -239,6 +240,7 @@ enum ProfilePickerEntry {
pub(crate) struct ProfilePickerDelegate {
fs: Arc<dyn Fs>,
provider: Arc<dyn ProfileProvider>,
+ foreground: ForegroundExecutor,
background: BackgroundExecutor,
candidates: Vec<ProfileCandidate>,
string_candidates: Arc<Vec<StringMatchCandidate>>,
@@ -255,6 +257,7 @@ impl ProfilePickerDelegate {
fs: Arc<dyn Fs>,
provider: Arc<dyn ProfileProvider>,
profiles: AvailableProfiles,
+ foreground: ForegroundExecutor,
background: BackgroundExecutor,
focus_handle: FocusHandle,
cx: &mut Context<ProfileSelector>,
@@ -266,6 +269,7 @@ impl ProfilePickerDelegate {
let mut this = Self {
fs,
provider,
+ foreground,
background,
candidates,
string_candidates,
@@ -401,7 +405,7 @@ impl ProfilePickerDelegate {
let cancel_flag = AtomicBool::new(false);
- self.background.block(match_strings(
+ self.foreground.block_on(match_strings(
self.string_candidates.as_ref(),
query,
false,
@@ -734,6 +738,7 @@ mod tests {
let delegate = ProfilePickerDelegate {
fs: FakeFs::new(cx.background_executor().clone()),
provider: Arc::new(TestProfileProvider::new(AgentProfileId("write".into()))),
+ foreground: cx.foreground_executor().clone(),
background: cx.background_executor().clone(),
candidates,
string_candidates: Arc::new(Vec::new()),
@@ -771,6 +776,7 @@ mod tests {
let delegate = ProfilePickerDelegate {
fs: FakeFs::new(cx.background_executor().clone()),
provider: Arc::new(TestProfileProvider::new(AgentProfileId("write".into()))),
+ foreground: cx.foreground_executor().clone(),
background: cx.background_executor().clone(),
candidates,
string_candidates: Arc::new(Vec::new()),
@@ -1,22 +1,16 @@
use futures::channel::oneshot;
use git2::{DiffLineType as GitDiffLineType, DiffOptions as GitOptions, Patch as GitPatch};
-use gpui::{App, AppContext as _, Context, Entity, EventEmitter, Task, TaskLabel};
+use gpui::{App, AppContext as _, Context, Entity, EventEmitter, Task};
use language::{
BufferRow, Capability, DiffOptions, File, Language, LanguageName, LanguageRegistry,
language_settings::language_settings, word_diff_ranges,
};
use rope::Rope;
-use std::{
- cmp::Ordering,
- iter,
- ops::Range,
- sync::{Arc, LazyLock},
-};
+use std::{cmp::Ordering, future::Future, iter, ops::Range, sync::Arc};
use sum_tree::SumTree;
use text::{Anchor, Bias, BufferId, OffsetRangeExt, Point, ToOffset as _, ToPoint as _};
use util::ResultExt;
-pub static CALCULATE_DIFF_TASK: LazyLock<TaskLabel> = LazyLock::new(TaskLabel::new);
pub const MAX_WORD_DIFF_LINE_COUNT: usize = 5;
pub struct BufferDiff {
@@ -1138,10 +1132,9 @@ impl BufferDiff {
cx: &mut Context<Self>,
) -> Self {
let mut this = BufferDiff::new(&buffer, cx);
- let executor = cx.background_executor().clone();
let mut base_text = base_text.to_owned();
text::LineEnding::normalize(&mut base_text);
- let inner = executor.block(this.update_diff(
+ let inner = cx.foreground_executor().block_on(this.update_diff(
buffer.clone(),
Some(Arc::from(base_text)),
true,
@@ -1254,37 +1247,36 @@ impl BufferDiff {
cx,
);
- cx.background_executor()
- .spawn_labeled(*CALCULATE_DIFF_TASK, async move {
- let base_text_rope = if let Some(base_text) = &base_text {
- if base_text_changed {
- Rope::from(base_text.as_ref())
- } else {
- prev_base_text
- }
+ cx.background_executor().spawn(async move {
+ let base_text_rope = if let Some(base_text) = &base_text {
+ if base_text_changed {
+ Rope::from(base_text.as_ref())
} else {
- Rope::new()
- };
- let base_text_exists = base_text.is_some();
- let hunks = compute_hunks(
- base_text
- .clone()
- .map(|base_text| (base_text, base_text_rope.clone())),
- buffer.clone(),
- diff_options,
- );
- let base_text = base_text.unwrap_or_default();
- let inner = BufferDiffInner {
- base_text,
- hunks,
- base_text_exists,
- pending_hunks: SumTree::new(&buffer),
- };
- BufferDiffUpdate {
- inner,
- base_text_changed,
+ prev_base_text
}
- })
+ } else {
+ Rope::new()
+ };
+ let base_text_exists = base_text.is_some();
+ let hunks = compute_hunks(
+ base_text
+ .clone()
+ .map(|base_text| (base_text, base_text_rope.clone())),
+ buffer.clone(),
+ diff_options,
+ );
+ let base_text = base_text.unwrap_or_default();
+ let inner = BufferDiffInner {
+ base_text,
+ hunks,
+ base_text_exists,
+ pending_hunks: SumTree::new(&buffer),
+ };
+ BufferDiffUpdate {
+ inner,
+ base_text_changed,
+ }
+ })
}
pub fn language_changed(
@@ -1503,10 +1495,10 @@ impl BufferDiff {
let language = self.base_text(cx).language().cloned();
let base_text = self.base_text_string(cx).map(|s| s.as_str().into());
let fut = self.update_diff(buffer.clone(), base_text, false, language, cx);
- let executor = cx.background_executor().clone();
- let snapshot = executor.block(fut);
+ let fg_executor = cx.foreground_executor().clone();
+ let snapshot = fg_executor.block_on(fut);
let fut = self.set_snapshot_with_secondary_inner(snapshot, buffer, None, false, cx);
- let (changed_range, base_text_changed_range) = executor.block(fut);
+ let (changed_range, base_text_changed_range) = fg_executor.block_on(fut);
cx.emit(BufferDiffEvent::DiffChanged {
changed_range,
base_text_changed_range,
@@ -3,7 +3,7 @@ use anyhow::{Context as _, Result, anyhow};
use cloud_api_client::{AuthenticatedUser, GetAuthenticatedUserResponse, PlanInfo};
use cloud_llm_client::{CurrentUsage, PlanV1, UsageData, UsageLimit};
use futures::{StreamExt, stream::BoxStream};
-use gpui::{AppContext as _, BackgroundExecutor, Entity, TestAppContext};
+use gpui::{AppContext as _, Entity, TestAppContext};
use http_client::{AsyncBody, Method, Request, http};
use parking_lot::Mutex;
use rpc::{ConnectionId, Peer, Receipt, TypedEnvelope, proto};
@@ -13,7 +13,6 @@ pub struct FakeServer {
peer: Arc<Peer>,
state: Arc<Mutex<FakeServerState>>,
user_id: u64,
- executor: BackgroundExecutor,
}
#[derive(Default)]
@@ -35,7 +34,6 @@ impl FakeServer {
peer: Peer::new(0),
state: Default::default(),
user_id: client_user_id,
- executor: cx.executor(),
};
client.http_client().as_fake().replace_handler({
@@ -181,8 +179,6 @@ impl FakeServer {
#[allow(clippy::await_holding_lock)]
pub async fn receive<M: proto::EnvelopedMessage>(&self) -> Result<TypedEnvelope<M>> {
- self.executor.start_waiting();
-
let message = self
.state
.lock()
@@ -192,7 +188,6 @@ impl FakeServer {
.next()
.await
.context("other half hung up")?;
- self.executor.finish_waiting();
let type_name = message.payload_type_name();
let message = message.into_any();
@@ -250,8 +250,6 @@ impl Database {
{
#[cfg(test)]
{
- use rand::prelude::*;
-
let test_options = self.test_options.as_ref().unwrap();
test_options.executor.simulate_random_delay().await;
let fail_probability = *test_options.query_failure_probability.lock();
@@ -254,7 +254,6 @@ async fn test_channel_notes_participant_indices(
let (workspace_b, cx_b) = client_b.build_workspace(&project_b, cx_b);
// Clients A and B open the same file.
- executor.start_waiting();
let editor_a = workspace_a
.update_in(cx_a, |workspace, window, cx| {
workspace.open_path(
@@ -269,7 +268,6 @@ async fn test_channel_notes_participant_indices(
.unwrap()
.downcast::<Editor>()
.unwrap();
- executor.start_waiting();
let editor_b = workspace_b
.update_in(cx_b, |workspace, window, cx| {
workspace.open_path(
@@ -67,7 +67,7 @@ async fn test_host_disconnect(
client_a
.fs()
.insert_tree(
- "/a",
+ path!("/a"),
json!({
"a.txt": "a-contents",
"b.txt": "b-contents",
@@ -76,7 +76,7 @@ async fn test_host_disconnect(
.await;
let active_call_a = cx_a.read(ActiveCall::global);
- let (project_a, worktree_id) = client_a.build_local_project("/a", cx_a).await;
+ let (project_a, worktree_id) = client_a.build_local_project(path!("/a"), cx_a).await;
let worktree_a = project_a.read_with(cx_a, |project, cx| project.worktrees(cx).next().unwrap());
let project_id = active_call_a
@@ -153,7 +153,7 @@ async fn test_host_disconnect(
// Allow client A to reconnect to the server.
server.allow_connections();
- cx_a.background_executor.advance_clock(RECEIVE_TIMEOUT);
+ cx_a.background_executor.advance_clock(RECONNECT_TIMEOUT);
// Client B calls client A again after they reconnected.
let active_call_b = cx_b.read(ActiveCall::global);
@@ -429,6 +429,51 @@ async fn test_collaborating_with_completion(cx_a: &mut TestAppContext, cx_b: &mu
assert!(!buffer.completion_triggers().is_empty())
});
+ // Set up the completion request handlers BEFORE typing the trigger character.
+ // This is critical - the handlers must be in place when the request arrives,
+ // otherwise the requests will time out waiting for a response.
+ let mut first_completion_request = fake_language_server
+ .set_request_handler::<lsp::request::Completion, _, _>(|params, _| async move {
+ assert_eq!(
+ params.text_document_position.text_document.uri,
+ lsp::Uri::from_file_path(path!("/a/main.rs")).unwrap(),
+ );
+ assert_eq!(
+ params.text_document_position.position,
+ lsp::Position::new(0, 14),
+ );
+
+ Ok(Some(lsp::CompletionResponse::Array(vec![
+ lsp::CompletionItem {
+ label: "first_method(β¦)".into(),
+ detail: Some("fn(&mut self, B) -> C".into()),
+ text_edit: Some(lsp::CompletionTextEdit::Edit(lsp::TextEdit {
+ new_text: "first_method($1)".to_string(),
+ range: lsp::Range::new(
+ lsp::Position::new(0, 14),
+ lsp::Position::new(0, 14),
+ ),
+ })),
+ insert_text_format: Some(lsp::InsertTextFormat::SNIPPET),
+ ..Default::default()
+ },
+ lsp::CompletionItem {
+ label: "second_method(β¦)".into(),
+ detail: Some("fn(&mut self, C) -> D<E>".into()),
+ text_edit: Some(lsp::CompletionTextEdit::Edit(lsp::TextEdit {
+ new_text: "second_method()".to_string(),
+ range: lsp::Range::new(
+ lsp::Position::new(0, 14),
+ lsp::Position::new(0, 14),
+ ),
+ })),
+ insert_text_format: Some(lsp::InsertTextFormat::SNIPPET),
+ ..Default::default()
+ },
+ ])))
+ });
+ let mut second_completion_request = second_fake_language_server
+ .set_request_handler::<lsp::request::Completion, _, _>(|_, _| async move { Ok(None) });
// Type a completion trigger character as the guest.
editor_b.update_in(cx_b, |editor, window, cx| {
editor.change_selections(SelectionEffects::no_scroll(), window, cx, |s| {
@@ -442,6 +487,10 @@ async fn test_collaborating_with_completion(cx_a: &mut TestAppContext, cx_b: &mu
cx_b.background_executor.run_until_parked();
cx_a.background_executor.run_until_parked();
+ // Wait for the completion requests to be received by the fake language servers.
+ first_completion_request.next().await.unwrap();
+ second_completion_request.next().await.unwrap();
+
// Open the buffer on the host.
let buffer_a = project_a
.update(cx_a, |p, cx| {
@@ -1373,6 +1422,7 @@ async fn test_language_server_statuses(cx_a: &mut TestAppContext, cx_b: &mut Tes
.unwrap();
let fake_language_server = fake_language_servers.next().await.unwrap();
+ executor.run_until_parked();
fake_language_server.start_progress("the-token").await;
executor.advance_clock(SERVER_PROGRESS_THROTTLE_TIMEOUT);
@@ -1842,7 +1892,6 @@ async fn test_on_input_format_from_guest_to_host(
// Receive an OnTypeFormatting request as the host's language server.
// Return some formatting from the host's language server.
- executor.start_waiting();
fake_language_server
.set_request_handler::<lsp::request::OnTypeFormatting, _, _>(|params, _| async move {
assert_eq!(
@@ -1862,7 +1911,6 @@ async fn test_on_input_format_from_guest_to_host(
.next()
.await
.unwrap();
- executor.finish_waiting();
// Open the buffer on the host and see that the formatting worked
let buffer_a = project_a
@@ -2238,8 +2286,6 @@ async fn test_inlay_hint_refresh_is_forwarded(
let (workspace_a, cx_a) = client_a.build_workspace(&project_a, cx_a);
let (workspace_b, cx_b) = client_b.build_workspace(&project_b, cx_b);
- cx_a.background_executor.start_waiting();
-
let editor_a = workspace_a
.update_in(cx_a, |workspace, window, cx| {
workspace.open_path((worktree_id, rel_path("main.rs")), None, true, window, cx)
@@ -2303,7 +2349,6 @@ async fn test_inlay_hint_refresh_is_forwarded(
.next()
.await
.unwrap();
- executor.finish_waiting();
executor.run_until_parked();
editor_a.update(cx_a, |editor, cx| {
@@ -2915,7 +2960,6 @@ async fn test_lsp_pull_diagnostics(
.unwrap();
let (workspace_a, cx_a) = client_a.build_workspace(&project_a, cx_a);
- executor.start_waiting();
// The host opens a rust file.
let _buffer_a = project_a
@@ -2051,6 +2051,9 @@ async fn test_following_to_channel_notes_without_a_shared_project(
});
});
+ // Ensure client A's edits are synced to the server before client B starts following.
+ deterministic.run_until_parked();
+
// Client B follows client A.
workspace_b
.update_in(cx_b, |workspace, window, cx| {
@@ -4358,6 +4358,7 @@ async fn test_collaborating_with_lsp_progress_updates_and_diagnostics_ordering(
// Simulate a language server reporting errors for a file.
let fake_language_server = fake_language_servers.next().await.unwrap();
+ executor.run_until_parked();
fake_language_server
.request::<lsp::request::WorkDoneProgressCreate>(lsp::WorkDoneProgressCreateParams {
token: lsp::NumberOrString::String("the-disk-based-token".to_string()),
@@ -4570,6 +4571,7 @@ async fn test_formatting_buffer(
project.register_buffer_with_language_servers(&buffer_b, cx)
});
let fake_language_server = fake_language_servers.next().await.unwrap();
+ executor.run_until_parked();
fake_language_server.set_request_handler::<lsp::request::Formatting, _, _>(|_, _| async move {
Ok(Some(vec![
lsp::TextEdit {
@@ -5630,6 +5632,7 @@ async fn test_project_symbols(
.unwrap();
let fake_language_server = fake_language_servers.next().await.unwrap();
+ executor.run_until_parked();
fake_language_server.set_request_handler::<lsp::WorkspaceSymbolRequest, _, _>(
|_, _| async move {
Ok(Some(lsp::WorkspaceSymbolResponse::Flat(vec![
@@ -1110,7 +1110,8 @@ impl RandomizedTest for ProjectCollaborationTest {
let fs = fs.clone();
move |_, cx| {
let background = cx.background_executor();
- let mut rng = background.rng();
+ let rng = background.rng();
+ let mut rng = rng.lock();
let count = rng.random_range::<usize, _>(1..3);
let files = fs.as_fake().files();
let files = (0..count)
@@ -1136,7 +1137,8 @@ impl RandomizedTest for ProjectCollaborationTest {
move |_, cx| {
let mut highlights = Vec::new();
let background = cx.background_executor();
- let mut rng = background.rng();
+ let rng = background.rng();
+ let mut rng = rng.lock();
let highlight_count = rng.random_range(1..=5);
for _ in 0..highlight_count {
@@ -174,9 +174,7 @@ pub async fn run_randomized_test<T: RandomizedTest>(
}
drop(operation_channels);
- executor.start_waiting();
futures::future::join_all(client_tasks).await;
- executor.finish_waiting();
executor.run_until_parked();
T::on_quiesce(&mut server, &mut clients).await;
@@ -524,10 +522,8 @@ impl<T: RandomizedTest> TestPlan<T> {
server.forbid_connections();
server.disconnect_client(removed_peer_id);
deterministic.advance_clock(RECEIVE_TIMEOUT + RECONNECT_TIMEOUT);
- deterministic.start_waiting();
log::info!("waiting for user {} to exit...", removed_user_id);
client_task.await;
- deterministic.finish_waiting();
server.allow_connections();
for project in client.dev_server_projects().iter() {
@@ -488,6 +488,7 @@ impl CollabPanel {
let channel_store = self.channel_store.read(cx);
let user_store = self.user_store.read(cx);
let query = self.filter_editor.read(cx).text(cx);
+ let fg_executor = cx.foreground_executor();
let executor = cx.background_executor().clone();
let prev_selected_entry = self.selection.and_then(|ix| self.entries.get(ix).cloned());
@@ -517,7 +518,7 @@ impl CollabPanel {
self.match_candidates.clear();
self.match_candidates
.push(StringMatchCandidate::new(0, &user.github_login));
- let matches = executor.block(match_strings(
+ let matches = fg_executor.block_on(match_strings(
&self.match_candidates,
&query,
true,
@@ -561,7 +562,7 @@ impl CollabPanel {
&participant.user.github_login,
)
}));
- let mut matches = executor.block(match_strings(
+ let mut matches = fg_executor.block_on(match_strings(
&self.match_candidates,
&query,
true,
@@ -613,7 +614,7 @@ impl CollabPanel {
StringMatchCandidate::new(id, &participant.github_login)
},
));
- let matches = executor.block(match_strings(
+ let matches = fg_executor.block_on(match_strings(
&self.match_candidates,
&query,
true,
@@ -648,7 +649,7 @@ impl CollabPanel {
.ordered_channels()
.map(|(_, chan)| chan)
.collect::<Vec<_>>();
- let matches = executor.block(match_strings(
+ let matches = fg_executor.block_on(match_strings(
&self.match_candidates,
&query,
true,
@@ -750,7 +751,7 @@ impl CollabPanel {
.enumerate()
.map(|(ix, channel)| StringMatchCandidate::new(ix, &channel.name)),
);
- let matches = executor.block(match_strings(
+ let matches = fg_executor.block_on(match_strings(
&self.match_candidates,
&query,
true,
@@ -786,7 +787,7 @@ impl CollabPanel {
.enumerate()
.map(|(ix, user)| StringMatchCandidate::new(ix, &user.github_login)),
);
- let matches = executor.block(match_strings(
+ let matches = fg_executor.block_on(match_strings(
&self.match_candidates,
&query,
true,
@@ -811,7 +812,7 @@ impl CollabPanel {
.enumerate()
.map(|(ix, user)| StringMatchCandidate::new(ix, &user.github_login)),
);
- let matches = executor.block(match_strings(
+ let matches = fg_executor.block_on(match_strings(
&self.match_candidates,
&query,
true,
@@ -845,14 +846,14 @@ impl CollabPanel {
.map(|(ix, contact)| StringMatchCandidate::new(ix, &contact.user.github_login)),
);
- let matches = executor.block(match_strings(
+ let matches = fg_executor.block_on(match_strings(
&self.match_candidates,
&query,
true,
true,
usize::MAX,
&Default::default(),
- executor.clone(),
+ executor,
));
let (online_contacts, offline_contacts) = matches
@@ -297,7 +297,7 @@ impl PickerDelegate for ChannelModalDelegate {
StringMatchCandidate::new(id, &member.user.github_login)
}));
- let matches = cx.background_executor().block(match_strings(
+ let matches = cx.foreground_executor().block_on(match_strings(
&self.match_candidates,
&query,
true,
@@ -526,7 +526,7 @@ impl PickerDelegate for CommandPaletteDelegate {
};
match cx
- .background_executor()
+ .foreground_executor()
.block_with_timeout(duration, rx.clone().recv())
{
Ok(Some((commands, matches, interceptor_result))) => {
@@ -53,7 +53,7 @@ pub fn run_component_preview() {
let user_store = cx.new(|cx| UserStore::new(client.clone(), cx));
let workspace_store = cx.new(|cx| WorkspaceStore::new(client.clone(), cx));
let session_id = uuid::Uuid::new_v4().to_string();
- let session = cx.background_executor().block(Session::new(session_id));
+ let session = cx.foreground_executor().block_on(Session::new(session_id));
let session = cx.new(|cx| AppSession::new(session, cx));
let node_runtime = NodeRuntime::unavailable();
@@ -9,8 +9,7 @@ use text::Bias;
use util::RandomCharIter;
fn to_tab_point_benchmark(c: &mut Criterion) {
- let rng = StdRng::seed_from_u64(1);
- let dispatcher = TestDispatcher::new(rng);
+ let dispatcher = TestDispatcher::new(1);
let cx = gpui::TestAppContext::build(dispatcher, None);
let create_tab_map = |length: usize| {
@@ -55,8 +54,7 @@ fn to_tab_point_benchmark(c: &mut Criterion) {
}
fn to_fold_point_benchmark(c: &mut Criterion) {
- let rng = StdRng::seed_from_u64(1);
- let dispatcher = TestDispatcher::new(rng);
+ let dispatcher = TestDispatcher::new(1);
let cx = gpui::TestAppContext::build(dispatcher, None);
let create_tab_map = |length: usize| {
@@ -116,7 +116,7 @@ fn editor_render(bencher: &mut Bencher<'_>, cx: &TestAppContext) {
}
pub fn benches() {
- let dispatcher = TestDispatcher::new(StdRng::seed_from_u64(1));
+ let dispatcher = TestDispatcher::new(1);
let cx = gpui::TestAppContext::build(dispatcher, None);
cx.update(|cx| {
let store = SettingsStore::test(cx);
@@ -212,7 +212,7 @@ impl WrapMap {
});
match cx
- .background_executor()
+ .foreground_executor()
.block_with_timeout(Duration::from_millis(5), task)
{
Ok((snapshot, edits)) => {
@@ -292,7 +292,7 @@ impl WrapMap {
});
match cx
- .background_executor()
+ .foreground_executor()
.block_with_timeout(Duration::from_millis(1), update_task)
{
Ok((snapshot, output_edits)) => {
@@ -11815,7 +11815,6 @@ async fn test_document_format_during_save(cx: &mut TestAppContext) {
});
assert!(cx.read(|cx| editor.is_dirty(cx)));
- cx.executor().start_waiting();
let fake_server = fake_servers.next().await.unwrap();
{
@@ -11845,7 +11844,6 @@ async fn test_document_format_during_save(cx: &mut TestAppContext) {
)
})
.unwrap();
- cx.executor().start_waiting();
save.await;
assert_eq!(
@@ -11886,7 +11884,6 @@ async fn test_document_format_during_save(cx: &mut TestAppContext) {
})
.unwrap();
cx.executor().advance_clock(super::FORMAT_TIMEOUT);
- cx.executor().start_waiting();
save.await;
assert_eq!(
editor.update(cx, |editor, cx| editor.text(cx)),
@@ -11932,7 +11929,6 @@ async fn test_document_format_during_save(cx: &mut TestAppContext) {
)
})
.unwrap();
- cx.executor().start_waiting();
save.await;
}
}
@@ -11999,7 +11995,6 @@ async fn test_redo_after_noop_format(cx: &mut TestAppContext) {
)
})
.unwrap();
- cx.executor().start_waiting();
save.await;
assert!(!cx.read(|cx| editor.is_dirty(cx)));
}
@@ -12168,7 +12163,6 @@ async fn test_multibuffer_format_during_save(cx: &mut TestAppContext) {
});
cx.executor().run_until_parked();
- cx.executor().start_waiting();
let save = multi_buffer_editor
.update_in(cx, |editor, window, cx| {
editor.save(
@@ -12430,7 +12424,6 @@ async fn setup_range_format_test(
build_editor_with_project(project.clone(), buffer, window, cx)
});
- cx.executor().start_waiting();
let fake_server = fake_servers.next().await.unwrap();
(project, editor, cx, fake_server)
@@ -12472,7 +12465,6 @@ async fn test_range_format_on_save_success(cx: &mut TestAppContext) {
})
.next()
.await;
- cx.executor().start_waiting();
save.await;
assert_eq!(
editor.update(cx, |editor, cx| editor.text(cx)),
@@ -12515,7 +12507,6 @@ async fn test_range_format_on_save_timeout(cx: &mut TestAppContext) {
})
.unwrap();
cx.executor().advance_clock(super::FORMAT_TIMEOUT);
- cx.executor().start_waiting();
save.await;
assert_eq!(
editor.update(cx, |editor, cx| editor.text(cx)),
@@ -12547,7 +12538,6 @@ async fn test_range_format_not_called_for_clean_buffer(cx: &mut TestAppContext)
panic!("Should not be invoked");
})
.next();
- cx.executor().start_waiting();
save.await;
cx.run_until_parked();
}
@@ -12654,7 +12644,6 @@ async fn test_document_format_manual_trigger(cx: &mut TestAppContext) {
editor.set_text("one\ntwo\nthree\n", window, cx)
});
- cx.executor().start_waiting();
let fake_server = fake_servers.next().await.unwrap();
let format = editor
@@ -12682,7 +12671,6 @@ async fn test_document_format_manual_trigger(cx: &mut TestAppContext) {
})
.next()
.await;
- cx.executor().start_waiting();
format.await;
assert_eq!(
editor.update(cx, |editor, cx| editor.text(cx)),
@@ -12715,7 +12703,6 @@ async fn test_document_format_manual_trigger(cx: &mut TestAppContext) {
})
.unwrap();
cx.executor().advance_clock(super::FORMAT_TIMEOUT);
- cx.executor().start_waiting();
format.await;
assert_eq!(
editor.update(cx, |editor, cx| editor.text(cx)),
@@ -12770,8 +12757,6 @@ async fn test_multiple_formatters(cx: &mut TestAppContext) {
build_editor_with_project(project.clone(), buffer, window, cx)
});
- cx.executor().start_waiting();
-
let fake_server = fake_servers.next().await.unwrap();
fake_server.set_request_handler::<lsp::request::Formatting, _, _>(
move |_params, _| async move {
@@ -12873,7 +12858,6 @@ async fn test_multiple_formatters(cx: &mut TestAppContext) {
}
});
- cx.executor().start_waiting();
editor
.update_in(cx, |editor, window, cx| {
editor.perform_format(
@@ -13041,7 +13025,6 @@ async fn test_organize_imports_manual_trigger(cx: &mut TestAppContext) {
)
});
- cx.executor().start_waiting();
let fake_server = fake_servers.next().await.unwrap();
let format = editor
@@ -13087,7 +13070,6 @@ async fn test_organize_imports_manual_trigger(cx: &mut TestAppContext) {
})
.next()
.await;
- cx.executor().start_waiting();
format.await;
assert_eq!(
editor.update(cx, |editor, cx| editor.text(cx)),
@@ -13123,7 +13105,6 @@ async fn test_organize_imports_manual_trigger(cx: &mut TestAppContext) {
})
.unwrap();
cx.executor().advance_clock(super::CODE_ACTION_TIMEOUT);
- cx.executor().start_waiting();
format.await;
assert_eq!(
editor.update(cx, |editor, cx| editor.text(cx)),
@@ -13176,9 +13157,7 @@ async fn test_concurrent_format_requests(cx: &mut TestAppContext) {
// Wait for both format requests to complete
cx.executor().advance_clock(Duration::from_millis(200));
- cx.executor().start_waiting();
format_1.await.unwrap();
- cx.executor().start_waiting();
format_2.await.unwrap();
// The formatting edits only happens once.
@@ -14698,6 +14677,7 @@ async fn test_completion_in_multibuffer_with_replace_range(cx: &mut TestAppConte
});
let fake_server = fake_servers.next().await.unwrap();
+ cx.run_until_parked();
editor.update_in(cx, |editor, window, cx| {
editor.change_selections(SelectionEffects::no_scroll(), window, cx, |s| {
@@ -15067,6 +15047,7 @@ async fn test_completion_can_run_commands(cx: &mut TestAppContext) {
.downcast::<Editor>()
.unwrap();
let _fake_server = fake_servers.next().await.unwrap();
+ cx.run_until_parked();
editor.update_in(cx, |editor, window, cx| {
cx.focus_self(window);
@@ -15802,6 +15783,7 @@ async fn test_multiline_completion(cx: &mut TestAppContext) {
.downcast::<Editor>()
.unwrap();
let fake_server = fake_servers.next().await.unwrap();
+ cx.run_until_parked();
let multiline_label = "StickyHeaderExcerpt {\n excerpt,\n next_excerpt_controls_present,\n next_buffer_row,\n }: StickyHeaderExcerpt<'_>,";
let multiline_label_2 = "a\nb\nc\n";
@@ -18175,7 +18157,6 @@ async fn test_on_type_formatting_not_triggered(cx: &mut TestAppContext) {
.downcast::<Editor>()
.unwrap();
- cx.executor().start_waiting();
let fake_server = fake_servers.next().await.unwrap();
fake_server.set_request_handler::<lsp::request::OnTypeFormatting, _, _>(
@@ -25373,6 +25354,7 @@ async fn test_html_linked_edits_on_completion(cx: &mut TestAppContext) {
.unwrap();
let fake_server = fake_servers.next().await.unwrap();
+ cx.run_until_parked();
editor.update_in(cx, |editor, window, cx| {
editor.set_text("<ad></ad>", window, cx);
editor.change_selections(SelectionEffects::no_scroll(), window, cx, |selections| {
@@ -106,7 +106,7 @@ impl Editor {
// Try to resolve the indent in a short amount of time, otherwise move it to a background task.
match cx
- .background_executor()
+ .foreground_executor()
.block_with_timeout(Duration::from_micros(200), task)
{
Ok(result) => state.active_indent_range = result,
@@ -292,6 +292,7 @@ impl Editor {
};
let mut visible_excerpts = self.visible_excerpts(true, cx);
+
let mut invalidate_hints_for_buffers = HashSet::default();
let ignore_previous_fetches = match reason {
InlayHintRefreshReason::ModifiersChanged(_)
@@ -348,6 +349,7 @@ impl Editor {
let mut buffers_to_query = HashMap::default();
for (_, (buffer, buffer_version, visible_range)) in visible_excerpts {
let buffer_id = buffer.read(cx).remote_id();
+
if !self.registered_buffers.contains_key(&buffer_id) {
continue;
}
@@ -3656,35 +3658,49 @@ let c = 3;"#
})
.await
.unwrap();
- let editor =
- cx.add_window(|window, cx| Editor::for_buffer(buffer, Some(project), window, cx));
+ // Use a VisualTestContext and explicitly establish a viewport on the editor (the production
+ // trigger for `NewLinesShown` / inlay hint refresh) by setting visible line/column counts.
+ let (editor_entity, cx) =
+ cx.add_window_view(|window, cx| Editor::for_buffer(buffer, Some(project), window, cx));
+
+ editor_entity.update_in(cx, |editor, window, cx| {
+ // Establish a viewport. The exact values are not important for this test; we just need
+ // the editor to consider itself visible so the refresh pipeline runs.
+ editor.set_visible_line_count(50.0, window, cx);
+ editor.set_visible_column_count(120.0);
+
+ // Explicitly trigger a refresh now that the viewport exists.
+ editor.refresh_inlay_hints(InlayHintRefreshReason::NewLinesShown, cx);
+ });
cx.executor().run_until_parked();
- editor
- .update(cx, |editor, window, cx| {
- editor.change_selections(SelectionEffects::no_scroll(), window, cx, |s| {
- s.select_ranges([Point::new(10, 0)..Point::new(10, 0)])
- })
- })
- .unwrap();
+
+ editor_entity.update_in(cx, |editor, window, cx| {
+ editor.change_selections(SelectionEffects::no_scroll(), window, cx, |s| {
+ s.select_ranges([Point::new(10, 0)..Point::new(10, 0)])
+ });
+ });
cx.executor().run_until_parked();
- editor
- .update(cx, |editor, _window, cx| {
- let expected_hints = vec![
- "move".to_string(),
- "(".to_string(),
- "&x".to_string(),
- ") ".to_string(),
- ") ".to_string(),
- ];
- assert_eq!(
- expected_hints,
- cached_hint_labels(editor, cx),
- "Editor inlay hints should repeat server's order when placed at the same spot"
- );
- assert_eq!(expected_hints, visible_hint_labels(editor, cx));
- })
- .unwrap();
+
+ // Allow any async inlay hint request/response work to complete.
+ cx.executor().advance_clock(Duration::from_millis(100));
+ cx.executor().run_until_parked();
+
+ editor_entity.update(cx, |editor, cx| {
+ let expected_hints = vec![
+ "move".to_string(),
+ "(".to_string(),
+ "&x".to_string(),
+ ") ".to_string(),
+ ") ".to_string(),
+ ];
+ assert_eq!(
+ expected_hints,
+ cached_hint_labels(editor, cx),
+ "Editor inlay hints should repeat server's order when placed at the same spot"
+ );
+ assert_eq!(expected_hints, visible_hint_labels(editor, cx));
+ });
}
#[gpui::test]
@@ -130,6 +130,10 @@ impl EditorLspTestContext {
});
let lsp = fake_servers.next().await.unwrap();
+
+ // Ensure the language server is fully registered with the buffer
+ cx.executor().run_until_parked();
+
Self {
cx: EditorTestContext {
cx,
@@ -11,7 +11,7 @@ use fs::{Fs, RealFs};
use gpui::{TestAppContext, TestDispatcher};
use http_client::{FakeHttpClient, Response};
use node_runtime::NodeRuntime;
-use rand::{SeedableRng, rngs::StdRng};
+
use reqwest_client::ReqwestClient;
use serde_json::json;
use settings::SettingsStore;
@@ -41,8 +41,8 @@ fn extension_benchmarks(c: &mut Criterion) {
|| wasm_bytes.clone(),
|wasm_bytes| {
let _extension = cx
- .executor()
- .block(wasm_host.load_extension(wasm_bytes, &manifest, &cx.to_async()))
+ .foreground_executor()
+ .block_on(wasm_host.load_extension(wasm_bytes, &manifest, &cx.to_async()))
.unwrap();
},
BatchSize::SmallInput,
@@ -52,7 +52,7 @@ fn extension_benchmarks(c: &mut Criterion) {
fn init() -> TestAppContext {
const SEED: u64 = 9999;
- let dispatcher = TestDispatcher::new(StdRng::seed_from_u64(SEED));
+ let dispatcher = TestDispatcher::new(SEED);
let cx = TestAppContext::build(dispatcher, None);
cx.executor().allow_parking();
cx.update(|cx| {
@@ -72,8 +72,8 @@ fn wasm_bytes(cx: &TestAppContext, manifest: &mut ExtensionManifest, fs: Arc<dyn
.parent()
.unwrap()
.join("extensions/test-extension");
- cx.executor()
- .block(extension_builder.compile_extension(
+ cx.foreground_executor()
+ .block_on(extension_builder.compile_extension(
&path,
manifest,
CompileExtensionOptions { release: true },
@@ -282,7 +282,7 @@ impl ExtensionStore {
// list of the installed extensions and the resources that they provide.
// This index is loaded synchronously on startup.
let (index_content, index_metadata, extensions_metadata) =
- cx.background_executor().block(async {
+ cx.foreground_executor().block_on(async {
futures::join!(
this.fs.load(&this.index_path),
this.fs.metadata(&this.index_path),
@@ -336,6 +336,38 @@ impl ExtensionStore {
let mut index_changed = false;
let mut debounce_timer = cx.background_spawn(futures::future::pending()).fuse();
+
+ // If a test enables parking, it typically intends to allow real I/O progress while
+ // still using the scheduler-backed fake clock for deterministic execution.
+ // In that mode, debounce timers must use real time so that reloads can complete
+ // without requiring explicit fake-clock advancement.
+ #[cfg(any(test, feature = "test-support"))]
+ let use_real_time_debounce = cx
+ .background_executor()
+ .dispatcher()
+ .as_test()
+ .map(|test_dispatcher| test_dispatcher.scheduler().parking_allowed())
+ .unwrap_or(false);
+
+ #[cfg(not(any(test, feature = "test-support")))]
+ let use_real_time_debounce = false;
+
+ fn schedule_debounce(
+ use_real_time_debounce: bool,
+ cx: &mut gpui::AsyncApp,
+ ) -> futures::future::Fuse<gpui::Task<()>> {
+ if use_real_time_debounce {
+ cx.background_spawn(async move {
+ gpui::Timer::after(RELOAD_DEBOUNCE_DURATION).await;
+ })
+ .fuse()
+ } else {
+ cx.background_executor()
+ .timer(RELOAD_DEBOUNCE_DURATION)
+ .fuse()
+ }
+ }
+
loop {
select_biased! {
_ = debounce_timer => {
@@ -351,21 +383,15 @@ impl ExtensionStore {
Self::update_remote_clients(&this, cx).await?;
}
_ = connection_registered_rx.next() => {
- debounce_timer = cx
- .background_executor()
- .timer(RELOAD_DEBOUNCE_DURATION)
- .fuse();
+ debounce_timer = schedule_debounce(use_real_time_debounce, cx);
}
extension_id = reload_rx.next() => {
let Some(extension_id) = extension_id else { break; };
- this.update(cx, |this, _| {
+ this.update(cx, |this, _cx| {
this.modified_extensions.extend(extension_id);
})?;
index_changed = true;
- debounce_timer = cx
- .background_executor()
- .timer(RELOAD_DEBOUNCE_DURATION)
- .fuse();
+ debounce_timer = schedule_debounce(use_real_time_debounce, cx);
}
}
}
@@ -534,10 +534,27 @@ async fn test_extension_store(cx: &mut TestAppContext) {
#[gpui::test]
async fn test_extension_store_with_test_extension(cx: &mut TestAppContext) {
- log::info!("Initializing test");
init_test(cx);
cx.executor().allow_parking();
+ async fn await_or_timeout<T>(
+ what: &'static str,
+ seconds: u64,
+ future: impl std::future::Future<Output = T>,
+ ) -> T {
+ use futures::FutureExt as _;
+ use gpui::Timer;
+
+ let timeout = Timer::after(std::time::Duration::from_secs(seconds));
+
+ futures::select! {
+ output = future.fuse() => output,
+ _ = futures::FutureExt::fuse(timeout) => panic!(
+ "[test_extension_store_with_test_extension] timed out after {seconds}s while {what}"
+ )
+ }
+ }
+
let root_dir = Path::new(env!("CARGO_MANIFEST_DIR"))
.parent()
.unwrap()
@@ -559,9 +576,12 @@ async fn test_extension_store_with_test_extension(cx: &mut TestAppContext) {
let extensions_dir = extensions_tree.path().canonicalize().unwrap();
let project_dir = project_dir.path().canonicalize().unwrap();
- log::info!("Setting up test");
-
- let project = Project::test(fs.clone(), [project_dir.as_path()], cx).await;
+ let project = await_or_timeout(
+ "awaiting Project::test",
+ 5,
+ Project::test(fs.clone(), [project_dir.as_path()], cx),
+ )
+ .await;
let proxy = Arc::new(ExtensionHostProxy::new());
let theme_registry = Arc::new(ThemeRegistry::new(Box::new(())));
@@ -679,8 +699,6 @@ async fn test_extension_store_with_test_extension(cx: &mut TestAppContext) {
)
});
- log::info!("Flushing events");
-
// Ensure that debounces fire.
let mut events = cx.events(&extension_store);
let executor = cx.executor();
@@ -701,12 +719,15 @@ async fn test_extension_store_with_test_extension(cx: &mut TestAppContext) {
.detach();
});
- extension_store
- .update(cx, |store, cx| {
+ await_or_timeout(
+ "awaiting install_dev_extension",
+ 60,
+ extension_store.update(cx, |store, cx| {
store.install_dev_extension(test_extension_dir.clone(), cx)
- })
- .await
- .unwrap();
+ }),
+ )
+ .await
+ .unwrap();
let mut fake_servers = language_registry.register_fake_lsp_server(
LanguageServerName("gleam".into()),
@@ -716,15 +737,23 @@ async fn test_extension_store_with_test_extension(cx: &mut TestAppContext) {
},
None,
);
+ cx.executor().run_until_parked();
- let (buffer, _handle) = project
- .update(cx, |project, cx| {
+ let (buffer, _handle) = await_or_timeout(
+ "awaiting open_local_buffer_with_lsp",
+ 5,
+ project.update(cx, |project, cx| {
project.open_local_buffer_with_lsp(project_dir.join("test.gleam"), cx)
- })
+ }),
+ )
+ .await
+ .unwrap();
+ cx.executor().run_until_parked();
+
+ let fake_server = await_or_timeout("awaiting first fake server spawn", 10, fake_servers.next())
.await
.unwrap();
- let fake_server = fake_servers.next().await.unwrap();
let work_dir = extensions_dir.join(format!("work/{test_extension_id}"));
let expected_server_path = work_dir.join("gleam-v1.2.3/gleam");
let expected_binary_contents = language_server_version.lock().binary_contents.clone();
@@ -738,16 +767,30 @@ async fn test_extension_store_with_test_extension(cx: &mut TestAppContext) {
assert_eq!(fake_server.binary.path, expected_server_path);
assert_eq!(fake_server.binary.arguments, [OsString::from("lsp")]);
assert_eq!(
- fs.load(&expected_server_path).await.unwrap(),
+ await_or_timeout(
+ "awaiting fs.load(expected_server_path)",
+ 5,
+ fs.load(&expected_server_path)
+ )
+ .await
+ .unwrap(),
expected_binary_contents
);
assert_eq!(language_server_version.lock().http_request_count, 2);
assert_eq!(
[
- status_updates.next().await.unwrap(),
- status_updates.next().await.unwrap(),
- status_updates.next().await.unwrap(),
- status_updates.next().await.unwrap(),
+ await_or_timeout("awaiting status_updates #1", 5, status_updates.next())
+ .await
+ .unwrap(),
+ await_or_timeout("awaiting status_updates #2", 5, status_updates.next())
+ .await
+ .unwrap(),
+ await_or_timeout("awaiting status_updates #3", 5, status_updates.next())
+ .await
+ .unwrap(),
+ await_or_timeout("awaiting status_updates #4", 5, status_updates.next())
+ .await
+ .unwrap(),
],
[
(
@@ -796,16 +839,30 @@ async fn test_extension_store_with_test_extension(cx: &mut TestAppContext) {
])))
});
- let completion_labels = project
- .update(cx, |project, cx| {
+ // `register_fake_lsp_server` can yield a server instance before the client has finished the LSP
+ // initialization handshake. Wait until we observe the client's `initialized` notification before
+ // issuing requests like completion.
+ await_or_timeout("awaiting LSP Initialized notification", 5, async {
+ fake_server
+ .clone()
+ .try_receive_notification::<lsp::notification::Initialized>()
+ .await;
+ })
+ .await;
+
+ let completion_labels = await_or_timeout(
+ "awaiting completions",
+ 5,
+ project.update(cx, |project, cx| {
project.completions(&buffer, 0, DEFAULT_COMPLETION_CONTEXT, cx)
- })
- .await
- .unwrap()
- .into_iter()
- .flat_map(|response| response.completions)
- .map(|c| c.label.text)
- .collect::<Vec<_>>();
+ }),
+ )
+ .await
+ .unwrap()
+ .into_iter()
+ .flat_map(|response| response.completions)
+ .map(|c| c.label.text)
+ .collect::<Vec<_>>();
assert_eq!(
completion_labels,
[
@@ -829,40 +886,68 @@ async fn test_extension_store_with_test_extension(cx: &mut TestAppContext) {
// The extension has cached the binary path, and does not attempt
// to reinstall it.
- let fake_server = fake_servers.next().await.unwrap();
+ let fake_server = await_or_timeout("awaiting second fake server spawn", 5, fake_servers.next())
+ .await
+ .unwrap();
assert_eq!(fake_server.binary.path, expected_server_path);
assert_eq!(
- fs.load(&expected_server_path).await.unwrap(),
+ await_or_timeout(
+ "awaiting fs.load(expected_server_path) after restart",
+ 5,
+ fs.load(&expected_server_path)
+ )
+ .await
+ .unwrap(),
expected_binary_contents
);
assert_eq!(language_server_version.lock().http_request_count, 0);
// Reload the extension, clearing its cache.
// Start a new instance of the language server.
- extension_store
- .update(cx, |store, cx| {
+ await_or_timeout(
+ "awaiting extension_store.reload(test-extension)",
+ 5,
+ extension_store.update(cx, |store, cx| {
store.reload(Some("test-extension".into()), cx)
- })
- .await;
+ }),
+ )
+ .await;
cx.executor().run_until_parked();
project.update(cx, |project, cx| {
project.restart_language_servers_for_buffers(vec![buffer.clone()], HashSet::default(), cx)
});
// The extension re-fetches the latest version of the language server.
- let fake_server = fake_servers.next().await.unwrap();
+ let fake_server = await_or_timeout("awaiting third fake server spawn", 5, fake_servers.next())
+ .await
+ .unwrap();
let new_expected_server_path =
extensions_dir.join(format!("work/{test_extension_id}/gleam-v2.0.0/gleam"));
let expected_binary_contents = language_server_version.lock().binary_contents.clone();
assert_eq!(fake_server.binary.path, new_expected_server_path);
assert_eq!(fake_server.binary.arguments, [OsString::from("lsp")]);
assert_eq!(
- fs.load(&new_expected_server_path).await.unwrap(),
+ await_or_timeout(
+ "awaiting fs.load(new_expected_server_path)",
+ 5,
+ fs.load(&new_expected_server_path)
+ )
+ .await
+ .unwrap(),
expected_binary_contents
);
// The old language server directory has been cleaned up.
- assert!(fs.metadata(&expected_server_path).await.unwrap().is_none());
+ assert!(
+ await_or_timeout(
+ "awaiting fs.metadata(expected_server_path)",
+ 5,
+ fs.metadata(&expected_server_path)
+ )
+ .await
+ .unwrap()
+ .is_none()
+ );
}
fn init_test(cx: &mut TestAppContext) {
@@ -14,21 +14,15 @@ use git::{
UnmergedStatus,
},
};
-use gpui::{AsyncApp, BackgroundExecutor, SharedString, Task, TaskLabel};
+use gpui::{AsyncApp, BackgroundExecutor, SharedString, Task};
use ignore::gitignore::GitignoreBuilder;
use parking_lot::Mutex;
use rope::Rope;
use smol::future::FutureExt as _;
-use std::{
- path::PathBuf,
- sync::{Arc, LazyLock},
-};
+use std::{path::PathBuf, sync::Arc};
use text::LineEnding;
use util::{paths::PathStyle, rel_path::RelPath};
-pub static LOAD_INDEX_TEXT_TASK: LazyLock<TaskLabel> = LazyLock::new(TaskLabel::new);
-pub static LOAD_HEAD_TEXT_TASK: LazyLock<TaskLabel> = LazyLock::new(TaskLabel::new);
-
#[derive(Clone)]
pub struct FakeGitRepository {
pub(crate) fs: Arc<FakeFs>,
@@ -104,9 +98,7 @@ impl GitRepository for FakeGitRepository {
.context("not present in index")
.cloned()
});
- self.executor
- .spawn_labeled(*LOAD_INDEX_TEXT_TASK, async move { fut.await.ok() })
- .boxed()
+ self.executor.spawn(async move { fut.await.ok() }).boxed()
}
fn load_committed_text(&self, path: RepoPath) -> BoxFuture<'_, Option<String>> {
@@ -117,9 +109,7 @@ impl GitRepository for FakeGitRepository {
.context("not present in HEAD")
.cloned()
});
- self.executor
- .spawn_labeled(*LOAD_HEAD_TEXT_TASK, async move { fut.await.ok() })
- .boxed()
+ self.executor.spawn(async move { fut.await.ok() }).boxed()
}
fn load_blob_content(&self, oid: git::Oid) -> BoxFuture<'_, Result<String>> {
@@ -665,7 +655,7 @@ impl GitRepository for FakeGitRepository {
let repository_dir_path = self.repository_dir_path.parent().unwrap().to_path_buf();
async move {
executor.simulate_random_delay().await;
- let oid = git::Oid::random(&mut executor.rng());
+ let oid = git::Oid::random(&mut *executor.rng().lock());
let entry = fs.entry(&repository_dir_path)?;
checkpoints.lock().insert(oid, entry);
Ok(GitRepositoryCheckpoint { commit_sha: oid })
@@ -63,9 +63,6 @@ use smol::io::AsyncReadExt;
#[cfg(any(test, feature = "test-support"))]
use std::ffi::OsStr;
-#[cfg(any(test, feature = "test-support"))]
-pub use fake_git_repo::{LOAD_HEAD_TEXT_TASK, LOAD_INDEX_TEXT_TASK};
-
pub trait Watcher: Send + Sync {
fn add(&self, path: &Path) -> Result<()>;
fn remove(&self, path: &Path) -> Result<()>;
@@ -107,10 +107,12 @@ num_cpus = "1.13"
parking = "2.0.0"
parking_lot.workspace = true
postage.workspace = true
+chrono.workspace = true
profiling.workspace = true
rand.workspace = true
raw-window-handle = "0.6"
refineable.workspace = true
+scheduler.workspace = true
resvg = { version = "0.45.0", default-features = false, features = [
"text",
"system-fonts",
@@ -252,6 +254,7 @@ lyon = { version = "1.0", features = ["extra"] }
pretty_assertions.workspace = true
rand.workspace = true
reqwest_client = { workspace = true, features = ["test-support"] }
+scheduler = { workspace = true, features = ["test-support"] }
unicode-segmentation.workspace = true
util = { workspace = true, features = ["test-support"] }
@@ -36,11 +36,11 @@ pub use visual_test_context::*;
#[cfg(any(feature = "inspector", debug_assertions))]
use crate::InspectorElementRegistry;
use crate::{
- Action, ActionBuildError, ActionRegistry, Any, AnyView, AnyWindowHandle, AppContext, Asset,
- AssetSource, BackgroundExecutor, Bounds, ClipboardItem, CursorStyle, DispatchPhase, DisplayId,
- EventEmitter, FocusHandle, FocusMap, ForegroundExecutor, Global, KeyBinding, KeyContext,
- Keymap, Keystroke, LayoutId, Menu, MenuItem, OwnedMenu, PathPromptOptions, Pixels, Platform,
- PlatformDisplay, PlatformKeyboardLayout, PlatformKeyboardMapper, Point, Priority,
+ Action, ActionBuildError, ActionRegistry, Any, AnyView, AnyWindowHandle, AppContext, Arena,
+ Asset, AssetSource, BackgroundExecutor, Bounds, ClipboardItem, CursorStyle, DispatchPhase,
+ DisplayId, EventEmitter, FocusHandle, FocusMap, ForegroundExecutor, Global, KeyBinding,
+ KeyContext, Keymap, Keystroke, LayoutId, Menu, MenuItem, OwnedMenu, PathPromptOptions, Pixels,
+ Platform, PlatformDisplay, PlatformKeyboardLayout, PlatformKeyboardMapper, Point, Priority,
PromptBuilder, PromptButton, PromptHandle, PromptLevel, Render, RenderImage,
RenderablePromptHandle, Reservation, ScreenCaptureSource, SharedString, SubscriberSet,
Subscription, SvgRenderer, Task, TextRenderingMode, TextSystem, Window, WindowAppearance,
@@ -138,10 +138,8 @@ impl Application {
#[cfg(any(test, feature = "test-support"))]
log::info!("GPUI was compiled in test mode");
- let liveness = Arc::new(());
Self(App::new_app(
- current_platform(false, Arc::downgrade(&liveness)),
- liveness,
+ current_platform(false),
Arc::new(()),
Arc::new(NullHttpClient),
))
@@ -151,10 +149,8 @@ impl Application {
/// but makes it possible to run an application in an context like
/// SSH, where GUI applications are not allowed.
pub fn headless() -> Self {
- let liveness = Arc::new(());
Self(App::new_app(
- current_platform(true, Arc::downgrade(&liveness)),
- liveness,
+ current_platform(true),
Arc::new(()),
Arc::new(NullHttpClient),
))
@@ -588,7 +584,6 @@ impl GpuiMode {
/// You need a reference to an `App` to access the state of a [Entity].
pub struct App {
pub(crate) this: Weak<AppCell>,
- pub(crate) _liveness: Arc<()>,
pub(crate) platform: Rc<dyn Platform>,
pub(crate) mode: GpuiMode,
text_system: Arc<TextSystem>,
@@ -644,13 +639,15 @@ pub struct App {
pub(crate) text_rendering_mode: Rc<Cell<TextRenderingMode>>,
quit_mode: QuitMode,
quitting: bool,
+ /// Per-App element arena. This isolates element allocations between different
+ /// App instances (important for tests where multiple Apps run concurrently).
+ pub(crate) element_arena: RefCell<Arena>,
}
impl App {
#[allow(clippy::new_ret_no_self)]
pub(crate) fn new_app(
platform: Rc<dyn Platform>,
- liveness: Arc<()>,
asset_source: Arc<dyn AssetSource>,
http_client: Arc<dyn HttpClient>,
) -> Rc<AppCell> {
@@ -669,7 +666,6 @@ impl App {
let app = Rc::new_cyclic(|this| AppCell {
app: RefCell::new(App {
this: this.clone(),
- _liveness: liveness,
platform: platform.clone(),
text_system,
text_rendering_mode: Rc::new(Cell::new(TextRenderingMode::default())),
@@ -723,6 +719,7 @@ impl App {
#[cfg(any(test, feature = "test-support", debug_assertions))]
name: None,
+ element_arena: RefCell::new(Arena::new(1024 * 1024)),
}),
});
@@ -769,7 +766,7 @@ impl App {
let futures = futures::future::join_all(futures);
if self
- .background_executor
+ .foreground_executor
.block_with_timeout(SHUTDOWN_TIMEOUT, futures)
.is_err()
{
@@ -2542,6 +2539,13 @@ impl<'a, T> Drop for GpuiBorrow<'a, T> {
}
}
+impl Drop for App {
+ fn drop(&mut self) {
+ self.foreground_executor.close();
+ self.background_executor.close();
+ }
+}
+
#[cfg(test)]
mod test {
use std::{cell::RefCell, rc::Rc};
@@ -9,7 +9,7 @@ use crate::{
};
use anyhow::{anyhow, bail};
use futures::{Stream, StreamExt, channel::oneshot};
-use rand::{SeedableRng, rngs::StdRng};
+
use std::{
cell::RefCell, future::Future, ops::Deref, path::PathBuf, rc::Rc, sync::Arc, time::Duration,
};
@@ -116,16 +116,14 @@ impl TestAppContext {
/// Creates a new `TestAppContext`. Usually you can rely on `#[gpui::test]` to do this for you.
pub fn build(dispatcher: TestDispatcher, fn_name: Option<&'static str>) -> Self {
let arc_dispatcher = Arc::new(dispatcher.clone());
- let liveness = std::sync::Arc::new(());
let background_executor = BackgroundExecutor::new(arc_dispatcher.clone());
- let foreground_executor =
- ForegroundExecutor::new(arc_dispatcher, Arc::downgrade(&liveness));
+ let foreground_executor = ForegroundExecutor::new(arc_dispatcher);
let platform = TestPlatform::new(background_executor.clone(), foreground_executor.clone());
let asset_source = Arc::new(());
let http_client = http_client::FakeHttpClient::with_404_response();
let text_system = Arc::new(TextSystem::new(platform.text_system()));
- let app = App::new_app(platform.clone(), liveness, asset_source, http_client);
+ let app = App::new_app(platform.clone(), asset_source, http_client);
app.borrow_mut().mode = GpuiMode::test();
Self {
@@ -147,7 +145,7 @@ impl TestAppContext {
/// Create a single TestAppContext, for non-multi-client tests
pub fn single() -> Self {
- let dispatcher = TestDispatcher::new(StdRng::seed_from_u64(0));
+ let dispatcher = TestDispatcher::new(0);
Self::build(dispatcher, None)
}
@@ -656,11 +654,9 @@ impl<V> Entity<V> {
}
}
- cx.borrow().background_executor().start_waiting();
rx.recv()
.await
.expect("view dropped with pending condition");
- cx.borrow().background_executor().finish_waiting();
}
})
.await
@@ -57,12 +57,9 @@ impl VisualTestAppContext {
.and_then(|s| s.parse().ok())
.unwrap_or(0);
- // Create liveness for task cancellation
- let liveness = Arc::new(());
-
// Create a visual test platform that combines real Mac rendering
// with controllable TestDispatcher for deterministic task scheduling
- let platform = Rc::new(VisualTestPlatform::new(seed, Arc::downgrade(&liveness)));
+ let platform = Rc::new(VisualTestPlatform::new(seed));
// Get the dispatcher and executors from the platform
let dispatcher = platform.dispatcher().clone();
@@ -73,7 +70,7 @@ impl VisualTestAppContext {
let http_client = http_client::FakeHttpClient::with_404_response();
- let mut app = App::new_app(platform.clone(), liveness, asset_source, http_client);
+ let mut app = App::new_app(platform.clone(), asset_source, http_client);
app.borrow_mut().mode = GpuiMode::test();
Self {
@@ -32,9 +32,9 @@
//! your own custom layout algorithm or rendering a code editor.
use crate::{
- App, ArenaBox, AvailableSpace, Bounds, Context, DispatchNodeId, ELEMENT_ARENA, ElementId,
- FocusHandle, InspectorElementId, LayoutId, Pixels, Point, Size, Style, Window,
- util::FluentBuilder,
+ App, ArenaBox, AvailableSpace, Bounds, Context, DispatchNodeId, ElementId, FocusHandle,
+ InspectorElementId, LayoutId, Pixels, Point, Size, Style, Window, util::FluentBuilder,
+ window::with_element_arena,
};
use derive_more::{Deref, DerefMut};
use std::{
@@ -579,8 +579,7 @@ impl AnyElement {
E: 'static + Element,
E::RequestLayoutState: Any,
{
- let element = ELEMENT_ARENA
- .with_borrow_mut(|arena| arena.alloc(|| Drawable::new(element)))
+ let element = with_element_arena(|arena| arena.alloc(|| Drawable::new(element)))
.map(|element| element as &mut dyn ElementObject);
AnyElement(element)
}
@@ -1,99 +1,38 @@
-use crate::{App, PlatformDispatcher, RunnableMeta, RunnableVariant, TaskTiming, profiler};
-use async_task::Runnable;
+use crate::{App, PlatformDispatcher, PlatformScheduler};
use futures::channel::mpsc;
-use parking_lot::{Condvar, Mutex};
+use scheduler::Scheduler;
use smol::prelude::*;
use std::{
fmt::Debug,
+ future::Future,
marker::PhantomData,
- mem::{self, ManuallyDrop},
- num::NonZeroUsize,
- panic::Location,
+ mem,
pin::Pin,
rc::Rc,
- sync::{
- Arc,
- atomic::{AtomicUsize, Ordering},
- },
- task::{Context, Poll},
- thread::{self, ThreadId},
+ sync::Arc,
time::{Duration, Instant},
};
-use util::TryFutureExt as _;
-use waker_fn::waker_fn;
+use util::TryFutureExt;
-#[cfg(any(test, feature = "test-support"))]
-use rand::rngs::StdRng;
+pub use scheduler::{FallibleTask, Priority};
/// A pointer to the executor that is currently running,
/// for spawning background tasks.
#[derive(Clone)]
pub struct BackgroundExecutor {
- #[doc(hidden)]
- pub dispatcher: Arc<dyn PlatformDispatcher>,
+ inner: scheduler::BackgroundExecutor,
+ dispatcher: Arc<dyn PlatformDispatcher>,
}
/// A pointer to the executor that is currently running,
/// for spawning tasks on the main thread.
-///
-/// This is intentionally `!Send` via the `not_send` marker field. This is because
-/// `ForegroundExecutor::spawn` does not require `Send` but checks at runtime that the future is
-/// only polled from the same thread it was spawned from. These checks would fail when spawning
-/// foreground tasks from background threads.
#[derive(Clone)]
pub struct ForegroundExecutor {
- #[doc(hidden)]
- pub dispatcher: Arc<dyn PlatformDispatcher>,
- liveness: std::sync::Weak<()>,
+ inner: scheduler::ForegroundExecutor,
+ dispatcher: Arc<dyn PlatformDispatcher>,
not_send: PhantomData<Rc<()>>,
}
-/// Realtime task priority
-#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)]
-#[repr(u8)]
-pub enum RealtimePriority {
- /// Audio task
- Audio,
- /// Other realtime task
- #[default]
- Other,
-}
-
-/// Task priority
-#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)]
-#[repr(u8)]
-pub enum Priority {
- /// Realtime priority
- ///
- /// Spawning a task with this priority will spin it off on a separate thread dedicated just to that task.
- Realtime(RealtimePriority),
- /// High priority
- ///
- /// Only use for tasks that are critical to the user experience / responsiveness of the editor.
- High,
- /// Medium priority, probably suits most of your use cases.
- #[default]
- Medium,
- /// Low priority
- ///
- /// Prioritize this for background work that can come in large quantities
- /// to not starve the executor of resources for high priority tasks
- Low,
-}
-
-impl Priority {
- #[allow(dead_code)]
- pub(crate) const fn probability(&self) -> u32 {
- match self {
- // realtime priorities are not considered for probability scheduling
- Priority::Realtime(_) => 0,
- Priority::High => 60,
- Priority::Medium => 30,
- Priority::Low => 10,
- }
- }
-}
-
/// Task is a primitive that allows work to happen in the background.
///
/// It implements [`Future`] so you can `.await` on it.
@@ -102,63 +41,57 @@ impl Priority {
/// the task to continue running, but with no way to return a value.
#[must_use]
#[derive(Debug)]
-pub struct Task<T>(TaskState<T>);
-
-#[derive(Debug)]
-enum TaskState<T> {
- /// A task that is ready to return a value
- Ready(Option<T>),
-
- /// A task that is currently running.
- Spawned(async_task::Task<T, RunnableMeta>),
-}
+pub struct Task<T>(scheduler::Task<T>);
impl<T> Task<T> {
- /// Creates a new task that will resolve with the value
+ /// Creates a new task that will resolve with the value.
pub fn ready(val: T) -> Self {
- Task(TaskState::Ready(Some(val)))
+ Task(scheduler::Task::ready(val))
}
- /// Detaching a task runs it to completion in the background
+ /// Returns true if the task has completed or was created with `Task::ready`.
+ pub fn is_ready(&self) -> bool {
+ self.0.is_ready()
+ }
+
+ /// Detaching a task runs it to completion in the background.
pub fn detach(self) {
- match self {
- Task(TaskState::Ready(_)) => {}
- Task(TaskState::Spawned(task)) => task.detach(),
- }
+ self.0.detach()
+ }
+
+ /// Wraps a scheduler::Task.
+ pub fn from_scheduler(task: scheduler::Task<T>) -> Self {
+ Task(task)
}
/// Converts this task into a fallible task that returns `Option<T>`.
///
/// Unlike the standard `Task<T>`, a [`FallibleTask`] will return `None`
- /// if the app was dropped while the task is executing.
+ /// if the task was cancelled.
///
/// # Example
///
/// ```ignore
- /// // Background task that gracefully handles app shutdown:
+ /// // Background task that gracefully handles cancellation:
/// cx.background_spawn(async move {
/// let result = foreground_task.fallible().await;
/// if let Some(value) = result {
/// // Process the value
/// }
- /// // If None, app was shut down - just exit gracefully
+ /// // If None, task was cancelled - just exit gracefully
/// }).detach();
/// ```
pub fn fallible(self) -> FallibleTask<T> {
- FallibleTask(match self.0 {
- TaskState::Ready(val) => FallibleTaskState::Ready(val),
- TaskState::Spawned(task) => FallibleTaskState::Spawned(task.fallible()),
- })
+ self.0.fallible()
}
}
-impl<E, T> Task<Result<T, E>>
+impl<T, E> Task<Result<T, E>>
where
T: 'static,
E: 'static + Debug,
{
- /// Run the task to completion in the background and log any
- /// errors that occur.
+ /// Run the task to completion in the background and log any errors that occur.
#[track_caller]
pub fn detach_and_log_err(self, cx: &App) {
let location = core::panic::Location::caller();
@@ -168,102 +101,42 @@ where
}
}
-impl<T> Future for Task<T> {
+impl<T> std::future::Future for Task<T> {
type Output = T;
- fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
- match unsafe { self.get_unchecked_mut() } {
- Task(TaskState::Ready(val)) => Poll::Ready(val.take().unwrap()),
- Task(TaskState::Spawned(task)) => task.poll(cx),
- }
- }
-}
-
-/// A task that returns `Option<T>` instead of panicking when cancelled.
-#[must_use]
-pub struct FallibleTask<T>(FallibleTaskState<T>);
-
-enum FallibleTaskState<T> {
- /// A task that is ready to return a value
- Ready(Option<T>),
-
- /// A task that is currently running (wraps async_task::FallibleTask).
- Spawned(async_task::FallibleTask<T, RunnableMeta>),
-}
-
-impl<T> FallibleTask<T> {
- /// Creates a new fallible task that will resolve with the value.
- pub fn ready(val: T) -> Self {
- FallibleTask(FallibleTaskState::Ready(Some(val)))
- }
-
- /// Detaching a task runs it to completion in the background.
- pub fn detach(self) {
- match self.0 {
- FallibleTaskState::Ready(_) => {}
- FallibleTaskState::Spawned(task) => task.detach(),
- }
+ fn poll(
+ self: std::pin::Pin<&mut Self>,
+ cx: &mut std::task::Context<'_>,
+ ) -> std::task::Poll<Self::Output> {
+ // SAFETY: Task is a repr(transparent) wrapper around scheduler::Task,
+ // and we're just projecting the pin through to the inner task.
+ let inner = unsafe { self.map_unchecked_mut(|t| &mut t.0) };
+ inner.poll(cx)
}
}
-impl<T> Future for FallibleTask<T> {
- type Output = Option<T>;
+impl BackgroundExecutor {
+ /// Creates a new BackgroundExecutor from the given PlatformDispatcher.
+ pub fn new(dispatcher: Arc<dyn PlatformDispatcher>) -> Self {
+ #[cfg(any(test, feature = "test-support"))]
+ let scheduler: Arc<dyn Scheduler> = if let Some(test_dispatcher) = dispatcher.as_test() {
+ test_dispatcher.scheduler().clone()
+ } else {
+ Arc::new(PlatformScheduler::new(dispatcher.clone()))
+ };
- fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
- match unsafe { self.get_unchecked_mut() } {
- FallibleTask(FallibleTaskState::Ready(val)) => Poll::Ready(val.take()),
- FallibleTask(FallibleTaskState::Spawned(task)) => Pin::new(task).poll(cx),
- }
- }
-}
+ #[cfg(not(any(test, feature = "test-support")))]
+ let scheduler: Arc<dyn Scheduler> = Arc::new(PlatformScheduler::new(dispatcher.clone()));
-impl<T> std::fmt::Debug for FallibleTask<T> {
- fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
- match &self.0 {
- FallibleTaskState::Ready(_) => f.debug_tuple("FallibleTask::Ready").finish(),
- FallibleTaskState::Spawned(task) => {
- f.debug_tuple("FallibleTask::Spawned").field(task).finish()
- }
+ Self {
+ inner: scheduler::BackgroundExecutor::new(scheduler),
+ dispatcher,
}
}
-}
-
-/// A task label is an opaque identifier that you can use to
-/// refer to a task in tests.
-#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
-pub struct TaskLabel(NonZeroUsize);
-impl Default for TaskLabel {
- fn default() -> Self {
- Self::new()
- }
-}
-
-impl TaskLabel {
- /// Construct a new task label.
- pub fn new() -> Self {
- static NEXT_TASK_LABEL: AtomicUsize = AtomicUsize::new(1);
- Self(
- NEXT_TASK_LABEL
- .fetch_add(1, Ordering::SeqCst)
- .try_into()
- .unwrap(),
- )
- }
-}
-
-type AnyLocalFuture<R> = Pin<Box<dyn 'static + Future<Output = R>>>;
-
-type AnyFuture<R> = Pin<Box<dyn 'static + Send + Future<Output = R>>>;
-
-/// BackgroundExecutor lets you run things on background threads.
-/// In production this is a thread pool with no ordering guarantees.
-/// In tests this is simulated by running tasks one by one in a deterministic
-/// (but arbitrary) order controlled by the `SEED` environment variable.
-impl BackgroundExecutor {
- #[doc(hidden)]
- pub fn new(dispatcher: Arc<dyn PlatformDispatcher>) -> Self {
- Self { dispatcher }
+ /// Close this executor. Tasks will not run after this is called.
+ pub fn close(&self) {
+ self.inner.close();
}
/// Enqueues the given future to be run to completion on a background thread.
@@ -275,7 +148,15 @@ impl BackgroundExecutor {
self.spawn_with_priority(Priority::default(), future)
}
- /// Enqueues the given future to be run to completion on a background thread.
+ /// Enqueues the given future to be run to completion on a background thread with the given priority.
+ ///
+ /// `Priority::Realtime` is currently treated as `Priority::High`.
+ ///
+ /// This is intentionally *not* a "downgrade" feature: realtime execution is effectively
+ /// disabled until we have an in-tree use case and are confident about the semantics and
+ /// failure modes (especially around channel backpressure and the risk of blocking
+ /// latency-sensitive threads). It should be straightforward to add a true realtime
+ /// implementation back once those constraints are well-defined.
#[track_caller]
pub fn spawn_with_priority<R>(
&self,
@@ -285,7 +166,7 @@ impl BackgroundExecutor {
where
R: Send + 'static,
{
- self.spawn_internal::<R>(Box::pin(future), None, priority)
+ Task::from_scheduler(self.inner.spawn_with_priority(priority, future))
}
/// Enqueues the given future to be run to completion on a background thread and blocking the current task on it.
@@ -296,8 +177,10 @@ impl BackgroundExecutor {
where
R: Send,
{
- // We need to ensure that cancellation of the parent task does not drop the environment
- // before the our own task has completed or got cancelled.
+ use crate::RunnableMeta;
+ use parking_lot::{Condvar, Mutex};
+ use std::sync::{Arc, atomic::AtomicBool};
+
struct NotifyOnDrop<'a>(&'a (Condvar, Mutex<bool>));
impl Drop for NotifyOnDrop<'_> {
@@ -320,27 +203,21 @@ impl BackgroundExecutor {
let dispatcher = self.dispatcher.clone();
let location = core::panic::Location::caller();
+ let closed = Arc::new(AtomicBool::new(false));
let pair = &(Condvar::new(), Mutex::new(false));
let _wait_guard = WaitOnDrop(pair);
let (runnable, task) = unsafe {
async_task::Builder::new()
- .metadata(RunnableMeta {
- location,
- app: None,
- })
+ .metadata(RunnableMeta { location, closed })
.spawn_unchecked(
move |_| async {
let _notify_guard = NotifyOnDrop(pair);
future.await
},
move |runnable| {
- dispatcher.dispatch(
- RunnableVariant::Meta(runnable),
- None,
- Priority::default(),
- )
+ dispatcher.dispatch(runnable, Priority::default());
},
)
};
@@ -348,279 +225,6 @@ impl BackgroundExecutor {
task.await
}
- /// Enqueues the given future to be run to completion on a background thread.
- /// The given label can be used to control the priority of the task in tests.
- #[track_caller]
- pub fn spawn_labeled<R>(
- &self,
- label: TaskLabel,
- future: impl Future<Output = R> + Send + 'static,
- ) -> Task<R>
- where
- R: Send + 'static,
- {
- self.spawn_internal::<R>(Box::pin(future), Some(label), Priority::default())
- }
-
- #[track_caller]
- fn spawn_internal<R: Send + 'static>(
- &self,
- future: AnyFuture<R>,
- label: Option<TaskLabel>,
- priority: Priority,
- ) -> Task<R> {
- let dispatcher = self.dispatcher.clone();
- let (runnable, task) = if let Priority::Realtime(realtime) = priority {
- let location = core::panic::Location::caller();
- let (mut tx, rx) = flume::bounded::<Runnable<RunnableMeta>>(1);
-
- dispatcher.spawn_realtime(
- realtime,
- Box::new(move || {
- while let Ok(runnable) = rx.recv() {
- let start = Instant::now();
- let location = runnable.metadata().location;
- let mut timing = TaskTiming {
- location,
- start,
- end: None,
- };
- profiler::add_task_timing(timing);
-
- runnable.run();
-
- let end = Instant::now();
- timing.end = Some(end);
- profiler::add_task_timing(timing);
- }
- }),
- );
-
- async_task::Builder::new()
- .metadata(RunnableMeta {
- location,
- app: None,
- })
- .spawn(
- move |_| future,
- move |runnable| {
- let _ = tx.send(runnable);
- },
- )
- } else {
- let location = core::panic::Location::caller();
- async_task::Builder::new()
- .metadata(RunnableMeta {
- location,
- app: None,
- })
- .spawn(
- move |_| future,
- move |runnable| {
- dispatcher.dispatch(RunnableVariant::Meta(runnable), label, priority)
- },
- )
- };
-
- runnable.schedule();
- Task(TaskState::Spawned(task))
- }
-
- /// Used by the test harness to run an async test in a synchronous fashion.
- #[cfg(any(test, feature = "test-support"))]
- #[track_caller]
- pub fn block_test<R>(&self, future: impl Future<Output = R>) -> R {
- if let Ok(value) = self.block_internal(false, future, None) {
- value
- } else {
- unreachable!()
- }
- }
-
- /// Block the current thread until the given future resolves.
- /// Consider using `block_with_timeout` instead.
- pub fn block<R>(&self, future: impl Future<Output = R>) -> R {
- if let Ok(value) = self.block_internal(true, future, None) {
- value
- } else {
- unreachable!()
- }
- }
-
- #[cfg(not(any(test, feature = "test-support")))]
- pub(crate) fn block_internal<Fut: Future>(
- &self,
- _background_only: bool,
- future: Fut,
- timeout: Option<Duration>,
- ) -> Result<Fut::Output, impl Future<Output = Fut::Output> + use<Fut>> {
- use std::time::Instant;
-
- let mut future = Box::pin(future);
- if timeout == Some(Duration::ZERO) {
- return Err(future);
- }
- let deadline = timeout.map(|timeout| Instant::now() + timeout);
-
- let parker = parking::Parker::new();
- let unparker = parker.unparker();
- let waker = waker_fn(move || {
- unparker.unpark();
- });
- let mut cx = std::task::Context::from_waker(&waker);
-
- loop {
- match future.as_mut().poll(&mut cx) {
- Poll::Ready(result) => return Ok(result),
- Poll::Pending => {
- let timeout =
- deadline.map(|deadline| deadline.saturating_duration_since(Instant::now()));
- if let Some(timeout) = timeout {
- if !parker.park_timeout(timeout)
- && deadline.is_some_and(|deadline| deadline < Instant::now())
- {
- return Err(future);
- }
- } else {
- parker.park();
- }
- }
- }
- }
- }
-
- #[cfg(any(test, feature = "test-support"))]
- #[track_caller]
- pub(crate) fn block_internal<Fut: Future>(
- &self,
- background_only: bool,
- future: Fut,
- timeout: Option<Duration>,
- ) -> Result<Fut::Output, impl Future<Output = Fut::Output> + use<Fut>> {
- use std::sync::atomic::AtomicBool;
- use std::time::Instant;
-
- use parking::Parker;
-
- let mut future = Box::pin(future);
- if timeout == Some(Duration::ZERO) {
- return Err(future);
- }
-
- // When using a real platform (e.g., MacPlatform for visual tests that need actual
- // Metal rendering), there's no test dispatcher. In this case, we block the thread
- // directly by polling the future and parking until woken. This is required for
- // VisualTestAppContext which uses real platform rendering but still needs blocking
- // behavior for code paths like editor initialization that call block_with_timeout.
- let Some(dispatcher) = self.dispatcher.as_test() else {
- let deadline = timeout.map(|timeout| Instant::now() + timeout);
-
- let parker = Parker::new();
- let unparker = parker.unparker();
- let waker = waker_fn(move || {
- unparker.unpark();
- });
- let mut cx = std::task::Context::from_waker(&waker);
-
- loop {
- match future.as_mut().poll(&mut cx) {
- Poll::Ready(result) => return Ok(result),
- Poll::Pending => {
- let timeout = deadline
- .map(|deadline| deadline.saturating_duration_since(Instant::now()));
- if let Some(timeout) = timeout {
- if !parker.park_timeout(timeout)
- && deadline.is_some_and(|deadline| deadline < Instant::now())
- {
- return Err(future);
- }
- } else {
- parker.park();
- }
- }
- }
- }
- };
-
- let mut max_ticks = if timeout.is_some() {
- dispatcher.gen_block_on_ticks()
- } else {
- usize::MAX
- };
-
- let parker = Parker::new();
- let unparker = parker.unparker();
-
- let awoken = Arc::new(AtomicBool::new(false));
- let waker = waker_fn({
- let awoken = awoken.clone();
- let unparker = unparker.clone();
- move || {
- awoken.store(true, Ordering::SeqCst);
- unparker.unpark();
- }
- });
- let mut cx = std::task::Context::from_waker(&waker);
-
- let duration = Duration::from_secs(
- option_env!("GPUI_TEST_TIMEOUT")
- .and_then(|s| s.parse::<u64>().ok())
- .unwrap_or(180),
- );
- let mut test_should_end_by = Instant::now() + duration;
-
- loop {
- match future.as_mut().poll(&mut cx) {
- Poll::Ready(result) => return Ok(result),
- Poll::Pending => {
- if max_ticks == 0 {
- return Err(future);
- }
- max_ticks -= 1;
-
- if !dispatcher.tick(background_only) {
- if awoken.swap(false, Ordering::SeqCst) {
- continue;
- }
-
- if !dispatcher.parking_allowed() {
- if dispatcher.advance_clock_to_next_delayed() {
- continue;
- }
- let mut backtrace_message = String::new();
- let mut waiting_message = String::new();
- if let Some(backtrace) = dispatcher.waiting_backtrace() {
- backtrace_message =
- format!("\nbacktrace of waiting future:\n{:?}", backtrace);
- }
- if let Some(waiting_hint) = dispatcher.waiting_hint() {
- waiting_message = format!("\n waiting on: {}\n", waiting_hint);
- }
- panic!(
- "parked with nothing left to run{waiting_message}{backtrace_message}",
- )
- }
- dispatcher.push_unparker(unparker.clone());
- parker.park_timeout(Duration::from_millis(1));
- if Instant::now() > test_should_end_by {
- panic!("test timed out after {duration:?} with allow_parking")
- }
- }
- }
- }
- }
- }
-
- /// Block the current thread until the given future resolves
- /// or `duration` has elapsed.
- pub fn block_with_timeout<Fut: Future>(
- &self,
- duration: Duration,
- future: Fut,
- ) -> Result<Fut::Output, impl Future<Output = Fut::Output> + use<Fut>> {
- self.block_internal(true, future, Some(duration))
- }
-
/// Scoped lets you start a number of tasks and waits
/// for all of them to complete before returning.
pub async fn scoped<'scope, F>(&self, scheduler: F)
@@ -660,7 +264,7 @@ impl BackgroundExecutor {
/// Calling this instead of `std::time::Instant::now` allows the use
/// of fake timers in tests.
pub fn now(&self) -> Instant {
- self.dispatcher.now()
+ self.inner.scheduler().clock().now()
}
/// Returns a task that will complete after the given duration.
@@ -670,96 +274,86 @@ impl BackgroundExecutor {
if duration.is_zero() {
return Task::ready(());
}
- let location = core::panic::Location::caller();
- let (runnable, task) = async_task::Builder::new()
- .metadata(RunnableMeta {
- location,
- app: None,
- })
- .spawn(move |_| async move {}, {
- let dispatcher = self.dispatcher.clone();
- move |runnable| dispatcher.dispatch_after(duration, RunnableVariant::Meta(runnable))
- });
- runnable.schedule();
- Task(TaskState::Spawned(task))
- }
-
- /// in tests, start_waiting lets you indicate which task is waiting (for debugging only)
- #[cfg(any(test, feature = "test-support"))]
- pub fn start_waiting(&self) {
- self.dispatcher.as_test().unwrap().start_waiting();
+ self.spawn(self.inner.scheduler().timer(duration))
}
- /// in tests, removes the debugging data added by start_waiting
- #[cfg(any(test, feature = "test-support"))]
- pub fn finish_waiting(&self) {
- self.dispatcher.as_test().unwrap().finish_waiting();
- }
-
- /// in tests, run an arbitrary number of tasks (determined by the SEED environment variable)
+ /// In tests, run an arbitrary number of tasks (determined by the SEED environment variable)
#[cfg(any(test, feature = "test-support"))]
pub fn simulate_random_delay(&self) -> impl Future<Output = ()> + use<> {
self.dispatcher.as_test().unwrap().simulate_random_delay()
}
- /// in tests, indicate that a given task from `spawn_labeled` should run after everything else
- #[cfg(any(test, feature = "test-support"))]
- pub fn deprioritize(&self, task_label: TaskLabel) {
- self.dispatcher.as_test().unwrap().deprioritize(task_label)
- }
-
- /// in tests, move time forward. This does not run any tasks, but does make `timer`s ready.
+ /// In tests, move time forward. This does not run any tasks, but does make `timer`s ready.
#[cfg(any(test, feature = "test-support"))]
pub fn advance_clock(&self, duration: Duration) {
self.dispatcher.as_test().unwrap().advance_clock(duration)
}
- /// in tests, run one task.
+ /// In tests, run one task.
#[cfg(any(test, feature = "test-support"))]
pub fn tick(&self) -> bool {
- self.dispatcher.as_test().unwrap().tick(false)
+ self.dispatcher.as_test().unwrap().scheduler().tick()
}
- /// in tests, run all tasks that are ready to run. If after doing so
- /// the test still has outstanding tasks, this will panic. (See also [`Self::allow_parking`])
+ /// In tests, run tasks until the scheduler would park.
+ ///
+ /// Under the scheduler-backed test dispatcher, `tick()` will not advance the clock, so a pending
+ /// timer can keep `has_pending_tasks()` true even after all currently-runnable tasks have been
+ /// drained. To preserve the historical semantics that tests relied on (drain all work that can
+ /// make progress), we advance the clock to the next timer when no runnable tasks remain.
#[cfg(any(test, feature = "test-support"))]
pub fn run_until_parked(&self) {
- self.dispatcher.as_test().unwrap().run_until_parked()
+ let scheduler = self.dispatcher.as_test().unwrap().scheduler();
+ scheduler.run();
}
- /// in tests, prevents `run_until_parked` from panicking if there are outstanding tasks.
- /// This is useful when you are integrating other (non-GPUI) futures, like disk access, that
- /// do take real async time to run.
+ /// In tests, prevents `run_until_parked` from panicking if there are outstanding tasks.
#[cfg(any(test, feature = "test-support"))]
pub fn allow_parking(&self) {
- self.dispatcher.as_test().unwrap().allow_parking();
+ self.dispatcher
+ .as_test()
+ .unwrap()
+ .scheduler()
+ .allow_parking();
+
+ if std::env::var("GPUI_RUN_UNTIL_PARKED_LOG").ok().as_deref() == Some("1") {
+ log::warn!("[gpui::executor] allow_parking: enabled");
+ }
}
- /// undoes the effect of [`Self::allow_parking`].
+ /// Sets the range of ticks to run before timing out in block_on.
#[cfg(any(test, feature = "test-support"))]
- pub fn forbid_parking(&self) {
- self.dispatcher.as_test().unwrap().forbid_parking();
+ pub fn set_block_on_ticks(&self, range: std::ops::RangeInclusive<usize>) {
+ self.dispatcher
+ .as_test()
+ .unwrap()
+ .scheduler()
+ .set_timeout_ticks(range);
}
- /// adds detail to the "parked with nothing let to run" message.
+ /// Undoes the effect of [`Self::allow_parking`].
#[cfg(any(test, feature = "test-support"))]
- pub fn set_waiting_hint(&self, msg: Option<String>) {
- self.dispatcher.as_test().unwrap().set_waiting_hint(msg);
+ pub fn forbid_parking(&self) {
+ self.dispatcher
+ .as_test()
+ .unwrap()
+ .scheduler()
+ .forbid_parking();
}
- /// in tests, returns the rng used by the dispatcher and seeded by the `SEED` environment variable
+ /// In tests, returns the rng used by the dispatcher.
#[cfg(any(test, feature = "test-support"))]
- pub fn rng(&self) -> StdRng {
- self.dispatcher.as_test().unwrap().rng()
+ pub fn rng(&self) -> scheduler::SharedRng {
+ self.dispatcher.as_test().unwrap().scheduler().rng()
}
/// How many CPUs are available to the dispatcher.
pub fn num_cpus(&self) -> usize {
#[cfg(any(test, feature = "test-support"))]
- return 4;
-
- #[cfg(not(any(test, feature = "test-support")))]
- return num_cpus::get();
+ if self.dispatcher.as_test().is_some() {
+ return 4;
+ }
+ num_cpus::get()
}
/// Whether we're on the main thread.
@@ -767,150 +361,112 @@ impl BackgroundExecutor {
self.dispatcher.is_main_thread()
}
- #[cfg(any(test, feature = "test-support"))]
- /// in tests, control the number of ticks that `block_with_timeout` will run before timing out.
- pub fn set_block_on_ticks(&self, range: std::ops::RangeInclusive<usize>) {
- self.dispatcher.as_test().unwrap().set_block_on_ticks(range);
+ #[doc(hidden)]
+ pub fn dispatcher(&self) -> &Arc<dyn PlatformDispatcher> {
+ &self.dispatcher
}
}
-/// ForegroundExecutor runs things on the main thread.
impl ForegroundExecutor {
/// Creates a new ForegroundExecutor from the given PlatformDispatcher.
- pub fn new(dispatcher: Arc<dyn PlatformDispatcher>, liveness: std::sync::Weak<()>) -> Self {
+ pub fn new(dispatcher: Arc<dyn PlatformDispatcher>) -> Self {
+ #[cfg(any(test, feature = "test-support"))]
+ let (scheduler, session_id): (Arc<dyn Scheduler>, _) =
+ if let Some(test_dispatcher) = dispatcher.as_test() {
+ (
+ test_dispatcher.scheduler().clone(),
+ test_dispatcher.session_id(),
+ )
+ } else {
+ let platform_scheduler = Arc::new(PlatformScheduler::new(dispatcher.clone()));
+ let session_id = platform_scheduler.allocate_session_id();
+ (platform_scheduler, session_id)
+ };
+
+ #[cfg(not(any(test, feature = "test-support")))]
+ let (scheduler, session_id): (Arc<dyn Scheduler>, _) = {
+ let platform_scheduler = Arc::new(PlatformScheduler::new(dispatcher.clone()));
+ let session_id = platform_scheduler.allocate_session_id();
+ (platform_scheduler, session_id)
+ };
+
+ let inner = scheduler::ForegroundExecutor::new(session_id, scheduler);
+
Self {
+ inner,
dispatcher,
- liveness,
not_send: PhantomData,
}
}
- /// Enqueues the given Task to run on the main thread at some point in the future.
+ /// Close this executor. Tasks will not run after this is called.
+ pub fn close(&self) {
+ self.inner.close();
+ }
+
+ /// Enqueues the given Task to run on the main thread.
#[track_caller]
pub fn spawn<R>(&self, future: impl Future<Output = R> + 'static) -> Task<R>
where
R: 'static,
{
- self.inner_spawn(self.liveness.clone(), Priority::default(), future)
+ Task::from_scheduler(self.inner.spawn(future))
}
- /// Enqueues the given Task to run on the main thread at some point in the future.
+ /// Enqueues the given Task to run on the main thread with the given priority.
#[track_caller]
pub fn spawn_with_priority<R>(
&self,
- priority: Priority,
+ _priority: Priority,
future: impl Future<Output = R> + 'static,
) -> Task<R>
where
R: 'static,
{
- self.inner_spawn(self.liveness.clone(), priority, future)
+ // Priority is ignored for foreground tasks - they run in order on the main thread
+ Task::from_scheduler(self.inner.spawn(future))
}
+ /// Used by the test harness to run an async test in a synchronous fashion.
+ #[cfg(any(test, feature = "test-support"))]
#[track_caller]
- pub(crate) fn inner_spawn<R>(
- &self,
- app: std::sync::Weak<()>,
- priority: Priority,
- future: impl Future<Output = R> + 'static,
- ) -> Task<R>
- where
- R: 'static,
- {
- let dispatcher = self.dispatcher.clone();
- let location = core::panic::Location::caller();
+ pub fn block_test<R>(&self, future: impl Future<Output = R>) -> R {
+ use std::cell::Cell;
- #[track_caller]
- fn inner<R: 'static>(
- dispatcher: Arc<dyn PlatformDispatcher>,
- future: AnyLocalFuture<R>,
- location: &'static core::panic::Location<'static>,
- app: std::sync::Weak<()>,
- priority: Priority,
- ) -> Task<R> {
- let (runnable, task) = spawn_local_with_source_location(
- future,
- move |runnable| {
- dispatcher.dispatch_on_main_thread(RunnableVariant::Meta(runnable), priority)
- },
- RunnableMeta {
- location,
- app: Some(app),
- },
- );
- runnable.schedule();
- Task(TaskState::Spawned(task))
- }
- inner::<R>(dispatcher, Box::pin(future), location, app, priority)
- }
-}
+ let scheduler = self.inner.scheduler();
-/// Variant of `async_task::spawn_local` that includes the source location of the spawn in panics.
-///
-/// Copy-modified from:
-/// <https://github.com/smol-rs/async-task/blob/ca9dbe1db9c422fd765847fa91306e30a6bb58a9/src/runnable.rs#L405>
-#[track_caller]
-fn spawn_local_with_source_location<Fut, S, M>(
- future: Fut,
- schedule: S,
- metadata: M,
-) -> (Runnable<M>, async_task::Task<Fut::Output, M>)
-where
- Fut: Future + 'static,
- Fut::Output: 'static,
- S: async_task::Schedule<M> + Send + Sync + 'static,
- M: 'static,
-{
- #[inline]
- fn thread_id() -> ThreadId {
- std::thread_local! {
- static ID: ThreadId = thread::current().id();
- }
- ID.try_with(|id| *id)
- .unwrap_or_else(|_| thread::current().id())
- }
+ let output = Cell::new(None);
+ let future = async {
+ output.set(Some(future.await));
+ };
+ let mut future = std::pin::pin!(future);
- struct Checked<F> {
- id: ThreadId,
- inner: ManuallyDrop<F>,
- location: &'static Location<'static>,
- }
+ // In async GPUI tests, we must allow foreground tasks scheduled by the test itself
+ // (which are associated with the test session) to make progress while we block.
+ // Otherwise, awaiting futures that depend on same-session foreground work can deadlock.
+ scheduler.block(None, future.as_mut(), None);
- impl<F> Drop for Checked<F> {
- fn drop(&mut self) {
- assert!(
- self.id == thread_id(),
- "local task dropped by a thread that didn't spawn it. Task spawned at {}",
- self.location
- );
- unsafe { ManuallyDrop::drop(&mut self.inner) };
- }
+ output.take().expect("block_test future did not complete")
}
- impl<F: Future> Future for Checked<F> {
- type Output = F::Output;
+ /// Block the current thread until the given future resolves.
+ /// Consider using `block_with_timeout` instead.
+ pub fn block_on<R>(&self, future: impl Future<Output = R>) -> R {
+ self.inner.block_on(future)
+ }
- fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
- assert!(
- self.id == thread_id(),
- "local task polled by a thread that didn't spawn it. Task spawned at {}",
- self.location
- );
- unsafe { self.map_unchecked_mut(|c| &mut *c.inner).poll(cx) }
- }
+ /// Block the current thread until the given future resolves or the timeout elapses.
+ pub fn block_with_timeout<R, Fut: Future<Output = R>>(
+ &self,
+ duration: Duration,
+ future: Fut,
+ ) -> Result<R, impl Future<Output = R> + use<R, Fut>> {
+ self.inner.block_with_timeout(duration, future)
}
- // Wrap the future into one that checks which thread it's on.
- let future = Checked {
- id: thread_id(),
- inner: ManuallyDrop::new(future),
- location: Location::caller(),
- };
-
- unsafe {
- async_task::Builder::new()
- .metadata(metadata)
- .spawn_unchecked(move |_| future, schedule)
+ #[doc(hidden)]
+ pub fn dispatcher(&self) -> &Arc<dyn PlatformDispatcher> {
+ &self.dispatcher
}
}
@@ -20,6 +20,8 @@ pub mod colors;
mod element;
mod elements;
mod executor;
+mod platform_scheduler;
+pub(crate) use platform_scheduler::PlatformScheduler;
mod geometry;
mod global;
mod input;
@@ -42,10 +42,9 @@ use crate::{
Action, AnyWindowHandle, App, AsyncWindowContext, BackgroundExecutor, Bounds,
DEFAULT_WINDOW_SIZE, DevicePixels, DispatchEventResult, Font, FontId, FontMetrics, FontRun,
ForegroundExecutor, GlyphId, GpuSpecs, ImageSource, Keymap, LineLayout, Pixels, PlatformInput,
- Point, Priority, RealtimePriority, RenderGlyphParams, RenderImage, RenderImageParams,
- RenderSvgParams, Scene, ShapedGlyph, ShapedRun, SharedString, Size, SvgRenderer,
- SystemWindowTab, Task, TaskLabel, TaskTiming, ThreadTaskTimings, Window, WindowControlArea,
- hash, point, px, size,
+ Point, Priority, RenderGlyphParams, RenderImage, RenderImageParams, RenderSvgParams, Scene,
+ ShapedGlyph, ShapedRun, SharedString, Size, SvgRenderer, SystemWindowTab, Task, TaskTiming,
+ ThreadTaskTimings, Window, WindowControlArea, hash, point, px, size,
};
use anyhow::Result;
use async_task::Runnable;
@@ -55,6 +54,7 @@ use image::RgbaImage;
use image::codecs::gif::GifDecoder;
use image::{AnimationDecoder as _, Frame};
use raw_window_handle::{HasDisplayHandle, HasWindowHandle};
+pub use scheduler::RunnableMeta;
use schemars::JsonSchema;
use seahash::SeaHasher;
use serde::{Deserialize, Serialize};
@@ -98,45 +98,43 @@ pub use visual_test::VisualTestPlatform;
/// Returns a background executor for the current platform.
pub fn background_executor() -> BackgroundExecutor {
- // For standalone background executor, use a dead liveness since there's no App.
- // Weak::new() creates a weak reference that always returns None on upgrade.
- current_platform(true, std::sync::Weak::new()).background_executor()
+ current_platform(true).background_executor()
}
#[cfg(target_os = "macos")]
-pub(crate) fn current_platform(headless: bool, liveness: std::sync::Weak<()>) -> Rc<dyn Platform> {
- Rc::new(MacPlatform::new(headless, liveness))
+pub(crate) fn current_platform(headless: bool) -> Rc<dyn Platform> {
+ Rc::new(MacPlatform::new(headless))
}
#[cfg(any(target_os = "linux", target_os = "freebsd"))]
-pub(crate) fn current_platform(headless: bool, liveness: std::sync::Weak<()>) -> Rc<dyn Platform> {
+pub(crate) fn current_platform(headless: bool) -> Rc<dyn Platform> {
#[cfg(feature = "x11")]
use anyhow::Context as _;
if headless {
- return Rc::new(HeadlessClient::new(liveness));
+ return Rc::new(HeadlessClient::new());
}
match guess_compositor() {
#[cfg(feature = "wayland")]
- "Wayland" => Rc::new(WaylandClient::new(liveness)),
+ "Wayland" => Rc::new(WaylandClient::new()),
#[cfg(feature = "x11")]
"X11" => Rc::new(
- X11Client::new(liveness)
+ X11Client::new()
.context("Failed to initialize X11 client.")
.unwrap(),
),
- "Headless" => Rc::new(HeadlessClient::new(liveness)),
+ "Headless" => Rc::new(HeadlessClient::new()),
_ => unreachable!(),
}
}
#[cfg(target_os = "windows")]
-pub(crate) fn current_platform(_headless: bool, liveness: std::sync::Weak<()>) -> Rc<dyn Platform> {
+pub(crate) fn current_platform(_headless: bool) -> Rc<dyn Platform> {
Rc::new(
- WindowsPlatform::new(liveness)
+ WindowsPlatform::new()
.inspect_err(|err| show_error("Failed to launch", err.to_string()))
.unwrap(),
)
@@ -592,40 +590,10 @@ pub(crate) trait PlatformWindow: HasWindowHandle + HasDisplayHandle {
}
}
-/// This type is public so that our test macro can generate and use it, but it should not
-/// be considered part of our public API.
+/// Type alias for runnables with metadata.
+/// Previously an enum with a single variant, now simplified to a direct type alias.
#[doc(hidden)]
-pub struct RunnableMeta {
- /// Location of the runnable
- pub location: &'static core::panic::Location<'static>,
- /// Weak reference to check if the app is still alive before running this task
- pub app: Option<std::sync::Weak<()>>,
-}
-
-impl std::fmt::Debug for RunnableMeta {
- fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
- f.debug_struct("RunnableMeta")
- .field("location", &self.location)
- .field("app_alive", &self.is_app_alive())
- .finish()
- }
-}
-
-impl RunnableMeta {
- /// Returns true if the app is still alive (or if no app tracking is configured).
- pub fn is_app_alive(&self) -> bool {
- match &self.app {
- Some(weak) => weak.strong_count() > 0,
- None => true,
- }
- }
-}
-
-#[doc(hidden)]
-pub enum RunnableVariant {
- Meta(Runnable<RunnableMeta>),
- Compat(Runnable),
-}
+pub type RunnableVariant = Runnable<RunnableMeta>;
/// This type is public so that our test macro can generate and use it, but it should not
/// be considered part of our public API.
@@ -634,10 +602,10 @@ pub trait PlatformDispatcher: Send + Sync {
fn get_all_timings(&self) -> Vec<ThreadTaskTimings>;
fn get_current_thread_timings(&self) -> Vec<TaskTiming>;
fn is_main_thread(&self) -> bool;
- fn dispatch(&self, runnable: RunnableVariant, label: Option<TaskLabel>, priority: Priority);
+ fn dispatch(&self, runnable: RunnableVariant, priority: Priority);
fn dispatch_on_main_thread(&self, runnable: RunnableVariant, priority: Priority);
fn dispatch_after(&self, duration: Duration, runnable: RunnableVariant);
- fn spawn_realtime(&self, priority: RealtimePriority, f: Box<dyn FnOnce() + Send>);
+ fn spawn_realtime(&self, f: Box<dyn FnOnce() + Send>);
fn now(&self) -> Instant {
Instant::now()
@@ -13,8 +13,7 @@ use std::{
use crate::{
GLOBAL_THREAD_TIMINGS, PlatformDispatcher, Priority, PriorityQueueReceiver,
- PriorityQueueSender, RealtimePriority, RunnableVariant, THREAD_TIMINGS, TaskLabel, TaskTiming,
- ThreadTaskTimings, profiler,
+ PriorityQueueSender, RunnableVariant, THREAD_TIMINGS, TaskTiming, ThreadTaskTimings, profiler,
};
struct TimerAfter {
@@ -38,47 +37,34 @@ impl LinuxDispatcher {
let thread_count =
std::thread::available_parallelism().map_or(MIN_THREADS, |i| i.get().max(MIN_THREADS));
- // These thread should really be lower prio then the foreground
- // executor
let mut background_threads = (0..thread_count)
.map(|i| {
- let mut receiver = background_receiver.clone();
+ let mut receiver: PriorityQueueReceiver<RunnableVariant> =
+ background_receiver.clone();
std::thread::Builder::new()
.name(format!("Worker-{i}"))
.spawn(move || {
for runnable in receiver.iter() {
+ // Check if the executor that spawned this task was closed
+ if runnable.metadata().is_closed() {
+ continue;
+ }
+
let start = Instant::now();
- let mut location = match runnable {
- RunnableVariant::Meta(runnable) => {
- let location = runnable.metadata().location;
- let timing = TaskTiming {
- location,
- start,
- end: None,
- };
- profiler::add_task_timing(timing);
-
- runnable.run();
- timing
- }
- RunnableVariant::Compat(runnable) => {
- let location = core::panic::Location::caller();
- let timing = TaskTiming {
- location,
- start,
- end: None,
- };
- profiler::add_task_timing(timing);
-
- runnable.run();
- timing
- }
+ let location = runnable.metadata().location;
+ let mut timing = TaskTiming {
+ location,
+ start,
+ end: None,
};
+ profiler::add_task_timing(timing);
+
+ runnable.run();
let end = Instant::now();
- location.end = Some(end);
- profiler::add_task_timing(location);
+ timing.end = Some(end);
+ profiler::add_task_timing(timing);
log::trace!(
"background thread {}: ran runnable. took: {:?}",
@@ -94,7 +80,7 @@ impl LinuxDispatcher {
let (timer_sender, timer_channel) = calloop::channel::channel::<TimerAfter>();
let timer_thread = std::thread::Builder::new()
.name("Timer".to_owned())
- .spawn(|| {
+ .spawn(move || {
let mut event_loop: EventLoop<()> =
EventLoop::try_new().expect("Failed to initialize timer loop!");
@@ -103,39 +89,27 @@ impl LinuxDispatcher {
handle
.insert_source(timer_channel, move |e, _, _| {
if let channel::Event::Msg(timer) = e {
- // This has to be in an option to satisfy the borrow checker. The callback below should only be scheduled once.
let mut runnable = Some(timer.runnable);
timer_handle
.insert_source(
calloop::timer::Timer::from_duration(timer.duration),
move |_, _, _| {
if let Some(runnable) = runnable.take() {
+ // Check if the executor that spawned this task was closed
+ if runnable.metadata().is_closed() {
+ return TimeoutAction::Drop;
+ }
+
let start = Instant::now();
- let mut timing = match runnable {
- RunnableVariant::Meta(runnable) => {
- let location = runnable.metadata().location;
- let timing = TaskTiming {
- location,
- start,
- end: None,
- };
- profiler::add_task_timing(timing);
-
- runnable.run();
- timing
- }
- RunnableVariant::Compat(runnable) => {
- let timing = TaskTiming {
- location: core::panic::Location::caller(),
- start,
- end: None,
- };
- profiler::add_task_timing(timing);
-
- runnable.run();
- timing
- }
+ let location = runnable.metadata().location;
+ let mut timing = TaskTiming {
+ location,
+ start,
+ end: None,
};
+ profiler::add_task_timing(timing);
+
+ runnable.run();
let end = Instant::now();
timing.end = Some(end);
@@ -189,7 +163,7 @@ impl PlatformDispatcher for LinuxDispatcher {
thread::current().id() == self.main_thread_id
}
- fn dispatch(&self, runnable: RunnableVariant, _: Option<TaskLabel>, priority: Priority) {
+ fn dispatch(&self, runnable: RunnableVariant, priority: Priority) {
self.background_sender
.send(priority, runnable)
.unwrap_or_else(|_| panic!("blocking sender returned without value"));
@@ -217,19 +191,13 @@ impl PlatformDispatcher for LinuxDispatcher {
.ok();
}
- fn spawn_realtime(&self, priority: RealtimePriority, f: Box<dyn FnOnce() + Send>) {
+ fn spawn_realtime(&self, f: Box<dyn FnOnce() + Send>) {
std::thread::spawn(move || {
// SAFETY: always safe to call
let thread_id = unsafe { libc::pthread_self() };
- let policy = match priority {
- RealtimePriority::Audio => libc::SCHED_FIFO,
- RealtimePriority::Other => libc::SCHED_RR,
- };
- let sched_priority = match priority {
- RealtimePriority::Audio => 65,
- RealtimePriority::Other => 45,
- };
+ let policy = libc::SCHED_FIFO;
+ let sched_priority = 65;
// SAFETY: all sched_param members are valid when initialized to zero.
let mut sched_param =
@@ -238,7 +206,7 @@ impl PlatformDispatcher for LinuxDispatcher {
// SAFETY: sched_param is a valid initialized structure
let result = unsafe { libc::pthread_setschedparam(thread_id, policy, &sched_param) };
if result != 0 {
- log::warn!("failed to set realtime thread priority to {:?}", priority);
+ log::warn!("failed to set realtime thread priority");
}
f();
@@ -21,20 +21,17 @@ pub struct HeadlessClientState {
pub(crate) struct HeadlessClient(Rc<RefCell<HeadlessClientState>>);
impl HeadlessClient {
- pub(crate) fn new(liveness: std::sync::Weak<()>) -> Self {
+ pub(crate) fn new() -> Self {
let event_loop = EventLoop::try_new().unwrap();
- let (common, main_receiver) = LinuxCommon::new(event_loop.get_signal(), liveness);
+ let (common, main_receiver) = LinuxCommon::new(event_loop.get_signal());
let handle = event_loop.handle();
handle
.insert_source(main_receiver, |event, _, _: &mut HeadlessClient| {
if let calloop::channel::Event::Msg(runnable) = event {
- match runnable {
- crate::RunnableVariant::Meta(runnable) => runnable.run(),
- crate::RunnableVariant::Compat(runnable) => runnable.run(),
- };
+ runnable.run();
}
})
.ok();
@@ -149,10 +149,7 @@ pub(crate) struct LinuxCommon {
}
impl LinuxCommon {
- pub fn new(
- signal: LoopSignal,
- liveness: std::sync::Weak<()>,
- ) -> (Self, PriorityQueueCalloopReceiver<RunnableVariant>) {
+ pub fn new(signal: LoopSignal) -> (Self, PriorityQueueCalloopReceiver<RunnableVariant>) {
let (main_sender, main_receiver) = PriorityQueueCalloopReceiver::new();
#[cfg(any(feature = "wayland", feature = "x11"))]
@@ -168,7 +165,7 @@ impl LinuxCommon {
let common = LinuxCommon {
background_executor,
- foreground_executor: ForegroundExecutor::new(dispatcher, liveness),
+ foreground_executor: ForegroundExecutor::new(dispatcher),
text_system,
appearance: WindowAppearance::Light,
auto_hide_scrollbars: false,
@@ -81,10 +81,6 @@ use crate::{
PlatformInput, PlatformKeyboardLayout, Point, ResultExt as _, SCROLL_LINES, ScrollDelta,
ScrollWheelEvent, Size, TouchPhase, WindowParams, point, profiler, px, size,
};
-use crate::{
- RunnableVariant, TaskTiming,
- platform::{PlatformWindow, blade::BladeContext},
-};
use crate::{
SharedString,
platform::linux::{
@@ -99,6 +95,10 @@ use crate::{
xdg_desktop_portal::{Event as XDPEvent, XDPEventSource},
},
};
+use crate::{
+ TaskTiming,
+ platform::{PlatformWindow, blade::BladeContext},
+};
/// Used to convert evdev scancode to xkb scancode
const MIN_KEYCODE: u32 = 8;
@@ -453,7 +453,7 @@ fn wl_output_version(version: u32) -> u32 {
}
impl WaylandClient {
- pub(crate) fn new(liveness: std::sync::Weak<()>) -> Self {
+ pub(crate) fn new() -> Self {
let conn = Connection::connect_to_env().unwrap();
let (globals, mut event_queue) =
@@ -490,7 +490,7 @@ impl WaylandClient {
let event_loop = EventLoop::<WaylandClientStatePtr>::try_new().unwrap();
- let (common, main_receiver) = LinuxCommon::new(event_loop.get_signal(), liveness);
+ let (common, main_receiver) = LinuxCommon::new(event_loop.get_signal());
let handle = event_loop.handle();
handle
@@ -500,32 +500,15 @@ impl WaylandClient {
if let calloop::channel::Event::Msg(runnable) = event {
handle.insert_idle(|_| {
let start = Instant::now();
- let mut timing = match runnable {
- RunnableVariant::Meta(runnable) => {
- let location = runnable.metadata().location;
- let timing = TaskTiming {
- location,
- start,
- end: None,
- };
- profiler::add_task_timing(timing);
-
- runnable.run();
- timing
- }
- RunnableVariant::Compat(runnable) => {
- let location = core::panic::Location::caller();
- let timing = TaskTiming {
- location,
- start,
- end: None,
- };
- profiler::add_task_timing(timing);
-
- runnable.run();
- timing
- }
+ let location = runnable.metadata().location;
+ let mut timing = TaskTiming {
+ location,
+ start,
+ end: None,
};
+ profiler::add_task_timing(timing);
+
+ runnable.run();
let end = Instant::now();
timing.end = Some(end);
@@ -1,4 +1,4 @@
-use crate::{Capslock, ResultExt as _, RunnableVariant, TaskTiming, profiler, xcb_flush};
+use crate::{Capslock, ResultExt as _, TaskTiming, profiler, xcb_flush};
use anyhow::{Context as _, anyhow};
use ashpd::WindowIdentifier;
use calloop::{
@@ -297,10 +297,10 @@ impl X11ClientStatePtr {
pub(crate) struct X11Client(Rc<RefCell<X11ClientState>>);
impl X11Client {
- pub(crate) fn new(liveness: std::sync::Weak<()>) -> anyhow::Result<Self> {
+ pub(crate) fn new() -> anyhow::Result<Self> {
let event_loop = EventLoop::try_new()?;
- let (common, main_receiver) = LinuxCommon::new(event_loop.get_signal(), liveness);
+ let (common, main_receiver) = LinuxCommon::new(event_loop.get_signal());
let handle = event_loop.handle();
@@ -314,32 +314,15 @@ impl X11Client {
// callbacks.
handle.insert_idle(|_| {
let start = Instant::now();
- let mut timing = match runnable {
- RunnableVariant::Meta(runnable) => {
- let location = runnable.metadata().location;
- let timing = TaskTiming {
- location,
- start,
- end: None,
- };
- profiler::add_task_timing(timing);
-
- runnable.run();
- timing
- }
- RunnableVariant::Compat(runnable) => {
- let location = core::panic::Location::caller();
- let timing = TaskTiming {
- location,
- start,
- end: None,
- };
- profiler::add_task_timing(timing);
-
- runnable.run();
- timing
- }
+ let location = runnable.metadata().location;
+ let mut timing = TaskTiming {
+ location,
+ start,
+ end: None,
};
+ profiler::add_task_timing(timing);
+
+ runnable.run();
let end = Instant::now();
timing.end = Some(end);
@@ -3,12 +3,9 @@
#![allow(non_snake_case)]
use crate::{
- GLOBAL_THREAD_TIMINGS, PlatformDispatcher, Priority, RealtimePriority, RunnableMeta,
- RunnableVariant, THREAD_TIMINGS, TaskLabel, TaskTiming, ThreadTaskTimings,
+ GLOBAL_THREAD_TIMINGS, PlatformDispatcher, Priority, RunnableMeta, RunnableVariant,
+ THREAD_TIMINGS, TaskTiming, ThreadTaskTimings,
};
-
-use anyhow::Context;
-use async_task::Runnable;
use mach2::{
kern_return::KERN_SUCCESS,
mach_time::mach_timebase_info_data_t,
@@ -19,6 +16,9 @@ use mach2::{
thread_precedence_policy_data_t, thread_time_constraint_policy_data_t,
},
};
+use util::ResultExt;
+
+use async_task::Runnable;
use objc::{
class, msg_send,
runtime::{BOOL, YES},
@@ -26,11 +26,9 @@ use objc::{
};
use std::{
ffi::c_void,
- mem::MaybeUninit,
ptr::{NonNull, addr_of},
time::{Duration, Instant},
};
-use util::ResultExt;
/// All items in the generated file are marked as pub, so we're gonna wrap it in a separate mod to prevent
/// these pub items from leaking into public API.
@@ -45,6 +43,12 @@ pub(crate) fn dispatch_get_main_queue() -> dispatch_queue_t {
pub(crate) struct MacDispatcher;
+impl MacDispatcher {
+ pub fn new() -> Self {
+ Self
+ }
+}
+
impl PlatformDispatcher for MacDispatcher {
fn get_all_timings(&self) -> Vec<ThreadTaskTimings> {
let global_timings = GLOBAL_THREAD_TIMINGS.lock();
@@ -69,20 +73,13 @@ impl PlatformDispatcher for MacDispatcher {
is_main_thread == YES
}
- fn dispatch(&self, runnable: RunnableVariant, _: Option<TaskLabel>, priority: Priority) {
- let (context, trampoline) = match runnable {
- RunnableVariant::Meta(runnable) => (
- runnable.into_raw().as_ptr() as *mut c_void,
- Some(trampoline as unsafe extern "C" fn(*mut c_void)),
- ),
- RunnableVariant::Compat(runnable) => (
- runnable.into_raw().as_ptr() as *mut c_void,
- Some(trampoline_compat as unsafe extern "C" fn(*mut c_void)),
- ),
- };
+ fn dispatch(&self, runnable: RunnableVariant, priority: Priority) {
+ let context = runnable.into_raw().as_ptr() as *mut c_void;
let queue_priority = match priority {
- Priority::Realtime(_) => unreachable!(),
+ Priority::RealtimeAudio => {
+ panic!("RealtimeAudio priority should use spawn_realtime, not dispatch")
+ }
Priority::High => DISPATCH_QUEUE_PRIORITY_HIGH as isize,
Priority::Medium => DISPATCH_QUEUE_PRIORITY_DEFAULT as isize,
Priority::Low => DISPATCH_QUEUE_PRIORITY_LOW as isize,
@@ -92,76 +89,45 @@ impl PlatformDispatcher for MacDispatcher {
dispatch_async_f(
dispatch_get_global_queue(queue_priority, 0),
context,
- trampoline,
+ Some(trampoline as unsafe extern "C" fn(*mut c_void)),
);
}
}
fn dispatch_on_main_thread(&self, runnable: RunnableVariant, _priority: Priority) {
- let (context, trampoline) = match runnable {
- RunnableVariant::Meta(runnable) => (
- runnable.into_raw().as_ptr() as *mut c_void,
- Some(trampoline as unsafe extern "C" fn(*mut c_void)),
- ),
- RunnableVariant::Compat(runnable) => (
- runnable.into_raw().as_ptr() as *mut c_void,
- Some(trampoline_compat as unsafe extern "C" fn(*mut c_void)),
- ),
- };
+ let context = runnable.into_raw().as_ptr() as *mut c_void;
unsafe {
- dispatch_async_f(dispatch_get_main_queue(), context, trampoline);
+ dispatch_async_f(
+ dispatch_get_main_queue(),
+ context,
+ Some(trampoline as unsafe extern "C" fn(*mut c_void)),
+ );
}
}
fn dispatch_after(&self, duration: Duration, runnable: RunnableVariant) {
- let (context, trampoline) = match runnable {
- RunnableVariant::Meta(runnable) => (
- runnable.into_raw().as_ptr() as *mut c_void,
- Some(trampoline as unsafe extern "C" fn(*mut c_void)),
- ),
- RunnableVariant::Compat(runnable) => (
- runnable.into_raw().as_ptr() as *mut c_void,
- Some(trampoline_compat as unsafe extern "C" fn(*mut c_void)),
- ),
- };
+ let context = runnable.into_raw().as_ptr() as *mut c_void;
unsafe {
let queue =
dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_HIGH.try_into().unwrap(), 0);
let when = dispatch_time(DISPATCH_TIME_NOW as u64, duration.as_nanos() as i64);
- dispatch_after_f(when, queue, context, trampoline);
+ dispatch_after_f(
+ when,
+ queue,
+ context,
+ Some(trampoline as unsafe extern "C" fn(*mut c_void)),
+ );
}
}
- fn spawn_realtime(&self, priority: RealtimePriority, f: Box<dyn FnOnce() + Send>) {
+ fn spawn_realtime(&self, f: Box<dyn FnOnce() + Send>) {
std::thread::spawn(move || {
- match priority {
- RealtimePriority::Audio => set_audio_thread_priority(),
- RealtimePriority::Other => set_high_thread_priority(),
- }
- .context(format!("for priority {:?}", priority))
- .log_err();
-
+ set_audio_thread_priority().log_err();
f();
});
}
}
-fn set_high_thread_priority() -> anyhow::Result<()> {
- // SAFETY: always safe to call
- let thread_id = unsafe { libc::pthread_self() };
-
- // SAFETY: all sched_param members are valid when initialized to zero.
- let mut sched_param = unsafe { MaybeUninit::<libc::sched_param>::zeroed().assume_init() };
- sched_param.sched_priority = 45;
-
- let result = unsafe { libc::pthread_setschedparam(thread_id, libc::SCHED_FIFO, &sched_param) };
- if result != 0 {
- anyhow::bail!("failed to set realtime thread priority")
- }
-
- Ok(())
-}
-
fn set_audio_thread_priority() -> anyhow::Result<()> {
// https://chromium.googlesource.com/chromium/chromium/+/master/base/threading/platform_thread_mac.mm#93
@@ -247,54 +213,18 @@ fn set_audio_thread_priority() -> anyhow::Result<()> {
Ok(())
}
-extern "C" fn trampoline(runnable: *mut c_void) {
- let task =
- unsafe { Runnable::<RunnableMeta>::from_raw(NonNull::new_unchecked(runnable as *mut ())) };
+extern "C" fn trampoline(context: *mut c_void) {
+ let runnable =
+ unsafe { Runnable::<RunnableMeta>::from_raw(NonNull::new_unchecked(context as *mut ())) };
- let metadata = task.metadata();
- let location = metadata.location;
+ let metadata = runnable.metadata();
- if !metadata.is_app_alive() {
- drop(task);
+ // Check if the executor that spawned this task was closed
+ if metadata.is_closed() {
return;
}
- let start = Instant::now();
- let timing = TaskTiming {
- location,
- start,
- end: None,
- };
-
- THREAD_TIMINGS.with(|timings| {
- let mut timings = timings.lock();
- let timings = &mut timings.timings;
- if let Some(last_timing) = timings.iter_mut().rev().next() {
- if last_timing.location == timing.location {
- return;
- }
- }
-
- timings.push_back(timing);
- });
-
- task.run();
- let end = Instant::now();
-
- THREAD_TIMINGS.with(|timings| {
- let mut timings = timings.lock();
- let timings = &mut timings.timings;
- let Some(last_timing) = timings.iter_mut().rev().next() else {
- return;
- };
- last_timing.end = Some(end);
- });
-}
-
-extern "C" fn trampoline_compat(runnable: *mut c_void) {
- let task = unsafe { Runnable::<()>::from_raw(NonNull::new_unchecked(runnable as *mut ())) };
-
- let location = core::panic::Location::caller();
+ let location = metadata.location;
let start = Instant::now();
let timing = TaskTiming {
@@ -302,6 +232,7 @@ extern "C" fn trampoline_compat(runnable: *mut c_void) {
start,
end: None,
};
+
THREAD_TIMINGS.with(|timings| {
let mut timings = timings.lock();
let timings = &mut timings.timings;
@@ -314,7 +245,7 @@ extern "C" fn trampoline_compat(runnable: *mut c_void) {
timings.push_back(timing);
});
- task.run();
+ runnable.run();
let end = Instant::now();
THREAD_TIMINGS.with(|timings| {
@@ -174,8 +174,8 @@ pub(crate) struct MacPlatformState {
}
impl MacPlatform {
- pub(crate) fn new(headless: bool, liveness: std::sync::Weak<()>) -> Self {
- let dispatcher = Arc::new(MacDispatcher);
+ pub(crate) fn new(headless: bool) -> Self {
+ let dispatcher = Arc::new(MacDispatcher::new());
#[cfg(feature = "font-kit")]
let text_system = Arc::new(crate::MacTextSystem::new());
@@ -190,7 +190,7 @@ impl MacPlatform {
headless,
text_system,
background_executor: BackgroundExecutor::new(dispatcher.clone()),
- foreground_executor: ForegroundExecutor::new(dispatcher, liveness),
+ foreground_executor: ForegroundExecutor::new(dispatcher),
renderer_context: renderer::Context::default(),
general_pasteboard: Pasteboard::general(),
find_pasteboard: Pasteboard::find(),
@@ -1,275 +1,78 @@
-use crate::{PlatformDispatcher, Priority, RunnableVariant, TaskLabel};
-use backtrace::Backtrace;
-use collections::{HashMap, HashSet, VecDeque};
-use parking::Unparker;
-use parking_lot::Mutex;
-use rand::prelude::*;
+use crate::{PlatformDispatcher, Priority, RunnableVariant};
+use scheduler::{Clock, Scheduler, SessionId, TestScheduler, TestSchedulerConfig, Yield};
use std::{
- future::Future,
- ops::RangeInclusive,
- pin::Pin,
sync::Arc,
- task::{Context, Poll},
time::{Duration, Instant},
};
-use util::post_inc;
-
-#[derive(Copy, Clone, PartialEq, Eq, Hash)]
-struct TestDispatcherId(usize);
+/// TestDispatcher provides deterministic async execution for tests.
+///
+/// This implementation delegates task scheduling to the scheduler crate's `TestScheduler`.
+/// Access the scheduler directly via `scheduler()` for clock, rng, and parking control.
#[doc(hidden)]
pub struct TestDispatcher {
- id: TestDispatcherId,
- state: Arc<Mutex<TestDispatcherState>>,
-}
-
-struct TestDispatcherState {
- random: StdRng,
- foreground: HashMap<TestDispatcherId, VecDeque<RunnableVariant>>,
- background: Vec<RunnableVariant>,
- deprioritized_background: Vec<RunnableVariant>,
- delayed: Vec<(Duration, RunnableVariant)>,
- start_time: Instant,
- time: Duration,
- is_main_thread: bool,
- next_id: TestDispatcherId,
- allow_parking: bool,
- waiting_hint: Option<String>,
- waiting_backtrace: Option<Backtrace>,
- deprioritized_task_labels: HashSet<TaskLabel>,
- block_on_ticks: RangeInclusive<usize>,
- unparkers: Vec<Unparker>,
+ session_id: SessionId,
+ scheduler: Arc<TestScheduler>,
}
impl TestDispatcher {
- pub fn new(random: StdRng) -> Self {
- let state = TestDispatcherState {
- random,
- foreground: HashMap::default(),
- background: Vec::new(),
- deprioritized_background: Vec::new(),
- delayed: Vec::new(),
- time: Duration::ZERO,
- start_time: Instant::now(),
- is_main_thread: true,
- next_id: TestDispatcherId(1),
+ pub fn new(seed: u64) -> Self {
+ let scheduler = Arc::new(TestScheduler::new(TestSchedulerConfig {
+ seed,
+ randomize_order: true,
allow_parking: false,
- waiting_hint: None,
- waiting_backtrace: None,
- deprioritized_task_labels: Default::default(),
- block_on_ticks: 0..=1000,
- unparkers: Default::default(),
- };
+ capture_pending_traces: std::env::var("PENDING_TRACES")
+ .map_or(false, |var| var == "1" || var == "true"),
+ timeout_ticks: 0..=1000,
+ }));
+
+ let session_id = scheduler.allocate_session_id();
TestDispatcher {
- id: TestDispatcherId(0),
- state: Arc::new(Mutex::new(state)),
+ session_id,
+ scheduler,
}
}
- pub fn advance_clock(&self, by: Duration) {
- let new_now = self.state.lock().time + by;
- loop {
- self.run_until_parked();
- let state = self.state.lock();
- let next_due_time = state.delayed.first().map(|(time, _)| *time);
- drop(state);
- if let Some(due_time) = next_due_time
- && due_time <= new_now
- {
- self.state.lock().time = due_time;
- continue;
- }
- break;
- }
- self.state.lock().time = new_now;
+ pub fn scheduler(&self) -> &Arc<TestScheduler> {
+ &self.scheduler
}
- pub fn advance_clock_to_next_delayed(&self) -> bool {
- let next_due_time = self.state.lock().delayed.first().map(|(time, _)| *time);
- if let Some(next_due_time) = next_due_time {
- self.state.lock().time = next_due_time;
- return true;
- }
- false
+ pub fn session_id(&self) -> SessionId {
+ self.session_id
}
- pub fn simulate_random_delay(&self) -> impl 'static + Send + Future<Output = ()> + use<> {
- struct YieldNow {
- pub(crate) count: usize,
- }
-
- impl Future for YieldNow {
- type Output = ();
+ pub fn advance_clock(&self, by: Duration) {
+ self.scheduler.advance_clock(by);
+ }
- fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
- if self.count > 0 {
- self.count -= 1;
- cx.waker().wake_by_ref();
- Poll::Pending
- } else {
- Poll::Ready(())
- }
- }
- }
+ pub fn advance_clock_to_next_timer(&self) -> bool {
+ self.scheduler.advance_clock_to_next_timer()
+ }
- YieldNow {
- count: self.state.lock().random.random_range(0..10),
- }
+ pub fn simulate_random_delay(&self) -> Yield {
+ self.scheduler.yield_random()
}
pub fn tick(&self, background_only: bool) -> bool {
- let mut state = self.state.lock();
-
- while let Some((deadline, _)) = state.delayed.first() {
- if *deadline > state.time {
- break;
- }
- let (_, runnable) = state.delayed.remove(0);
- state.background.push(runnable);
- }
-
- let foreground_len: usize = if background_only {
- 0
+ if background_only {
+ self.scheduler.tick_background_only()
} else {
- state
- .foreground
- .values()
- .map(|runnables| runnables.len())
- .sum()
- };
- let background_len = state.background.len();
-
- let runnable;
- let main_thread;
- if foreground_len == 0 && background_len == 0 {
- let deprioritized_background_len = state.deprioritized_background.len();
- if deprioritized_background_len == 0 {
- return false;
- }
- let ix = state.random.random_range(0..deprioritized_background_len);
- main_thread = false;
- runnable = state.deprioritized_background.swap_remove(ix);
- } else {
- main_thread = state.random.random_ratio(
- foreground_len as u32,
- (foreground_len + background_len) as u32,
- );
- if main_thread {
- let state = &mut *state;
- runnable = state
- .foreground
- .values_mut()
- .filter(|runnables| !runnables.is_empty())
- .choose(&mut state.random)
- .unwrap()
- .pop_front()
- .unwrap();
- } else {
- let ix = state.random.random_range(0..background_len);
- runnable = state.background.swap_remove(ix);
- };
- };
-
- let was_main_thread = state.is_main_thread;
- state.is_main_thread = main_thread;
- drop(state);
-
- // todo(localcc): add timings to tests
- match runnable {
- RunnableVariant::Meta(runnable) => {
- if !runnable.metadata().is_app_alive() {
- drop(runnable);
- } else {
- runnable.run();
- }
- }
- RunnableVariant::Compat(runnable) => {
- runnable.run();
- }
- };
-
- self.state.lock().is_main_thread = was_main_thread;
-
- true
- }
-
- pub fn deprioritize(&self, task_label: TaskLabel) {
- self.state
- .lock()
- .deprioritized_task_labels
- .insert(task_label);
+ self.scheduler.tick()
+ }
}
pub fn run_until_parked(&self) {
while self.tick(false) {}
}
-
- pub fn parking_allowed(&self) -> bool {
- self.state.lock().allow_parking
- }
-
- pub fn allow_parking(&self) {
- self.state.lock().allow_parking = true
- }
-
- pub fn forbid_parking(&self) {
- self.state.lock().allow_parking = false
- }
-
- pub fn set_waiting_hint(&self, msg: Option<String>) {
- self.state.lock().waiting_hint = msg
- }
-
- pub fn waiting_hint(&self) -> Option<String> {
- self.state.lock().waiting_hint.clone()
- }
-
- pub fn start_waiting(&self) {
- self.state.lock().waiting_backtrace = Some(Backtrace::new_unresolved());
- }
-
- pub fn finish_waiting(&self) {
- self.state.lock().waiting_backtrace.take();
- }
-
- pub fn waiting_backtrace(&self) -> Option<Backtrace> {
- self.state.lock().waiting_backtrace.take().map(|mut b| {
- b.resolve();
- b
- })
- }
-
- pub fn rng(&self) -> StdRng {
- self.state.lock().random.clone()
- }
-
- pub fn set_block_on_ticks(&self, range: std::ops::RangeInclusive<usize>) {
- self.state.lock().block_on_ticks = range;
- }
-
- pub fn gen_block_on_ticks(&self) -> usize {
- let mut lock = self.state.lock();
- let block_on_ticks = lock.block_on_ticks.clone();
- lock.random.random_range(block_on_ticks)
- }
-
- pub fn unpark_all(&self) {
- self.state.lock().unparkers.retain(|parker| parker.unpark());
- }
-
- pub fn push_unparker(&self, unparker: Unparker) {
- let mut state = self.state.lock();
- state.unparkers.push(unparker);
- }
}
impl Clone for TestDispatcher {
fn clone(&self) -> Self {
- let id = post_inc(&mut self.state.lock().next_id.0);
+ let session_id = self.scheduler.allocate_session_id();
Self {
- id: TestDispatcherId(id),
- state: self.state.clone(),
+ session_id,
+ scheduler: self.scheduler.clone(),
}
}
}
@@ -284,50 +87,35 @@ impl PlatformDispatcher for TestDispatcher {
}
fn is_main_thread(&self) -> bool {
- self.state.lock().is_main_thread
+ self.scheduler.is_main_thread()
}
fn now(&self) -> Instant {
- let state = self.state.lock();
- state.start_time + state.time
+ self.scheduler.clock().now()
}
- fn dispatch(&self, runnable: RunnableVariant, label: Option<TaskLabel>, _priority: Priority) {
- {
- let mut state = self.state.lock();
- if label.is_some_and(|label| state.deprioritized_task_labels.contains(&label)) {
- state.deprioritized_background.push(runnable);
- } else {
- state.background.push(runnable);
- }
- }
- self.unpark_all();
+ fn dispatch(&self, runnable: RunnableVariant, priority: Priority) {
+ self.scheduler
+ .schedule_background_with_priority(runnable, priority);
}
fn dispatch_on_main_thread(&self, runnable: RunnableVariant, _priority: Priority) {
- self.state
- .lock()
- .foreground
- .entry(self.id)
- .or_default()
- .push_back(runnable);
- self.unpark_all();
+ self.scheduler
+ .schedule_foreground(self.session_id, runnable);
}
- fn dispatch_after(&self, duration: std::time::Duration, runnable: RunnableVariant) {
- let mut state = self.state.lock();
- let next_time = state.time + duration;
- let ix = match state.delayed.binary_search_by_key(&next_time, |e| e.0) {
- Ok(ix) | Err(ix) => ix,
- };
- state.delayed.insert(ix, (next_time, runnable));
+ fn dispatch_after(&self, _duration: Duration, _runnable: RunnableVariant) {
+ panic!(
+ "dispatch_after should not be called in tests. \
+ Use BackgroundExecutor::timer() which uses the scheduler's native timer."
+ );
}
fn as_test(&self) -> Option<&TestDispatcher> {
Some(self)
}
- fn spawn_realtime(&self, _priority: crate::RealtimePriority, f: Box<dyn FnOnce() + Send>) {
+ fn spawn_realtime(&self, f: Box<dyn FnOnce() + Send>) {
std::thread::spawn(move || {
f();
});
@@ -139,7 +139,6 @@ impl TestPlatform {
.new_path
.pop_front()
.expect("no pending new path prompt");
- self.background_executor().set_waiting_hint(None);
tx.send(Ok(select_path(&path))).ok();
}
@@ -151,7 +150,6 @@ impl TestPlatform {
.multiple_choice
.pop_front()
.expect("no pending multiple choice prompt");
- self.background_executor().set_waiting_hint(None);
let Some(ix) = prompt.answers.iter().position(|a| a == response) else {
panic!(
"PROMPT: {}\n{:?}\n{:?}\nCannot respond with {}",
@@ -186,8 +184,6 @@ impl TestPlatform {
) -> oneshot::Receiver<usize> {
let (tx, rx) = oneshot::channel();
let answers: Vec<String> = answers.iter().map(|s| s.label().to_string()).collect();
- self.background_executor()
- .set_waiting_hint(Some(format!("PROMPT: {:?} {:?}", msg, detail)));
self.prompts
.borrow_mut()
.multiple_choice
@@ -352,8 +348,6 @@ impl Platform for TestPlatform {
_suggested_name: Option<&str>,
) -> oneshot::Receiver<Result<Option<std::path::PathBuf>>> {
let (tx, rx) = oneshot::channel();
- self.background_executor()
- .set_waiting_hint(Some(format!("PROMPT FOR PATH: {:?}", directory)));
self.prompts
.borrow_mut()
.new_path
@@ -16,7 +16,7 @@ use crate::{
use anyhow::Result;
use futures::channel::oneshot;
use parking_lot::Mutex;
-use rand::SeedableRng;
+
use std::{
path::{Path, PathBuf},
rc::Rc,
@@ -39,19 +39,17 @@ pub struct VisualTestPlatform {
}
impl VisualTestPlatform {
- /// Creates a new VisualTestPlatform with the given random seed and liveness tracker.
+ /// Creates a new VisualTestPlatform with the given random seed.
///
/// The seed is used for deterministic random number generation in the TestDispatcher.
- /// The liveness weak reference is used to track when the app is being shut down.
- pub fn new(seed: u64, liveness: std::sync::Weak<()>) -> Self {
- let rng = rand::rngs::StdRng::seed_from_u64(seed);
- let dispatcher = TestDispatcher::new(rng);
+ pub fn new(seed: u64) -> Self {
+ let dispatcher = TestDispatcher::new(seed);
let arc_dispatcher = Arc::new(dispatcher.clone());
let background_executor = BackgroundExecutor::new(arc_dispatcher.clone());
- let foreground_executor = ForegroundExecutor::new(arc_dispatcher, liveness.clone());
+ let foreground_executor = ForegroundExecutor::new(arc_dispatcher);
- let mac_platform = MacPlatform::new(false, liveness);
+ let mac_platform = MacPlatform::new(false);
Self {
dispatcher,
@@ -14,7 +14,7 @@ use windows::{
Foundation::{LPARAM, WPARAM},
System::Threading::{
GetCurrentThread, HIGH_PRIORITY_CLASS, SetPriorityClass, SetThreadPriority,
- THREAD_PRIORITY_HIGHEST, THREAD_PRIORITY_TIME_CRITICAL,
+ THREAD_PRIORITY_TIME_CRITICAL,
},
UI::WindowsAndMessaging::PostMessageW,
},
@@ -22,8 +22,8 @@ use windows::{
use crate::{
GLOBAL_THREAD_TIMINGS, HWND, PlatformDispatcher, Priority, PriorityQueueSender,
- RealtimePriority, RunnableVariant, SafeHwnd, THREAD_TIMINGS, TaskLabel, TaskTiming,
- ThreadTaskTimings, WM_GPUI_TASK_DISPATCHED_ON_MAIN_THREAD, profiler,
+ RunnableVariant, SafeHwnd, THREAD_TIMINGS, TaskTiming, ThreadTaskTimings,
+ WM_GPUI_TASK_DISPATCHED_ON_MAIN_THREAD, profiler,
};
pub(crate) struct WindowsDispatcher {
@@ -56,7 +56,12 @@ impl WindowsDispatcher {
let handler = {
let mut task_wrapper = Some(runnable);
WorkItemHandler::new(move |_| {
- Self::execute_runnable(task_wrapper.take().unwrap());
+ let runnable = task_wrapper.take().unwrap();
+ // Check if the executor that spawned this task was closed
+ if runnable.metadata().is_closed() {
+ return Ok(());
+ }
+ Self::execute_runnable(runnable);
Ok(())
})
};
@@ -68,7 +73,12 @@ impl WindowsDispatcher {
let handler = {
let mut task_wrapper = Some(runnable);
TimerElapsedHandler::new(move |_| {
- Self::execute_runnable(task_wrapper.take().unwrap());
+ let runnable = task_wrapper.take().unwrap();
+ // Check if the executor that spawned this task was closed
+ if runnable.metadata().is_closed() {
+ return Ok(());
+ }
+ Self::execute_runnable(runnable);
Ok(())
})
};
@@ -79,33 +89,15 @@ impl WindowsDispatcher {
pub(crate) fn execute_runnable(runnable: RunnableVariant) {
let start = Instant::now();
- let mut timing = match runnable {
- RunnableVariant::Meta(runnable) => {
- let location = runnable.metadata().location;
- let timing = TaskTiming {
- location,
- start,
- end: None,
- };
- profiler::add_task_timing(timing);
-
- runnable.run();
-
- timing
- }
- RunnableVariant::Compat(runnable) => {
- let timing = TaskTiming {
- location: core::panic::Location::caller(),
- start,
- end: None,
- };
- profiler::add_task_timing(timing);
-
- runnable.run();
-
- timing
- }
+ let location = runnable.metadata().location;
+ let mut timing = TaskTiming {
+ location,
+ start,
+ end: None,
};
+ profiler::add_task_timing(timing);
+
+ runnable.run();
let end = Instant::now();
timing.end = Some(end);
@@ -138,18 +130,16 @@ impl PlatformDispatcher for WindowsDispatcher {
current().id() == self.main_thread_id
}
- fn dispatch(&self, runnable: RunnableVariant, label: Option<TaskLabel>, priority: Priority) {
+ fn dispatch(&self, runnable: RunnableVariant, priority: Priority) {
let priority = match priority {
- Priority::Realtime(_) => unreachable!(),
+ Priority::RealtimeAudio => {
+ panic!("RealtimeAudio priority should use spawn_realtime, not dispatch")
+ }
Priority::High => WorkItemPriority::High,
Priority::Medium => WorkItemPriority::Normal,
Priority::Low => WorkItemPriority::Low,
};
self.dispatch_on_threadpool(priority, runnable);
-
- if let Some(label) = label {
- log::debug!("TaskLabel: {label:?}");
- }
}
fn dispatch_on_main_thread(&self, runnable: RunnableVariant, priority: Priority) {
@@ -185,23 +175,18 @@ impl PlatformDispatcher for WindowsDispatcher {
self.dispatch_on_threadpool_after(runnable, duration);
}
- fn spawn_realtime(&self, priority: RealtimePriority, f: Box<dyn FnOnce() + Send>) {
+ fn spawn_realtime(&self, f: Box<dyn FnOnce() + Send>) {
std::thread::spawn(move || {
// SAFETY: always safe to call
let thread_handle = unsafe { GetCurrentThread() };
- let thread_priority = match priority {
- RealtimePriority::Audio => THREAD_PRIORITY_TIME_CRITICAL,
- RealtimePriority::Other => THREAD_PRIORITY_HIGHEST,
- };
-
// SAFETY: thread_handle is a valid handle to a thread
unsafe { SetPriorityClass(thread_handle, HIGH_PRIORITY_CLASS) }
.context("thread priority class")
.log_err();
// SAFETY: thread_handle is a valid handle to a thread
- unsafe { SetThreadPriority(thread_handle, thread_priority) }
+ unsafe { SetThreadPriority(thread_handle, THREAD_PRIORITY_TIME_CRITICAL) }
.context("thread priority")
.log_err();
@@ -93,7 +93,7 @@ impl WindowsPlatformState {
}
impl WindowsPlatform {
- pub(crate) fn new(liveness: std::sync::Weak<()>) -> Result<Self> {
+ pub(crate) fn new() -> Result<Self> {
unsafe {
OleInitialize(None).context("unable to initialize Windows OLE")?;
}
@@ -148,7 +148,7 @@ impl WindowsPlatform {
let disable_direct_composition = std::env::var(DISABLE_DIRECT_COMPOSITION)
.is_ok_and(|value| value == "true" || value == "1");
let background_executor = BackgroundExecutor::new(dispatcher.clone());
- let foreground_executor = ForegroundExecutor::new(dispatcher, liveness);
+ let foreground_executor = ForegroundExecutor::new(dispatcher);
let drop_target_helper: IDropTargetHelper = unsafe {
CoCreateInstance(&CLSID_DragDropHelper, None, CLSCTX_INPROC_SERVER)
@@ -0,0 +1,134 @@
+use crate::{PlatformDispatcher, RunnableMeta};
+use async_task::Runnable;
+use chrono::{DateTime, Utc};
+use futures::channel::oneshot;
+use scheduler::{Clock, Priority, Scheduler, SessionId, TestScheduler, Timer};
+use std::{
+ future::Future,
+ pin::Pin,
+ sync::{
+ Arc,
+ atomic::{AtomicU16, Ordering},
+ },
+ task::{Context, Poll},
+ time::{Duration, Instant},
+};
+use waker_fn::waker_fn;
+
+/// A production implementation of [`Scheduler`] that wraps a [`PlatformDispatcher`].
+///
+/// This allows GPUI to use the scheduler crate's executor types with the platform's
+/// native dispatch mechanisms (e.g., Grand Central Dispatch on macOS).
+pub struct PlatformScheduler {
+ dispatcher: Arc<dyn PlatformDispatcher>,
+ clock: Arc<PlatformClock>,
+ next_session_id: AtomicU16,
+}
+
+impl PlatformScheduler {
+ pub fn new(dispatcher: Arc<dyn PlatformDispatcher>) -> Self {
+ Self {
+ dispatcher: dispatcher.clone(),
+ clock: Arc::new(PlatformClock { dispatcher }),
+ next_session_id: AtomicU16::new(0),
+ }
+ }
+
+ pub fn allocate_session_id(&self) -> SessionId {
+ SessionId::new(self.next_session_id.fetch_add(1, Ordering::SeqCst))
+ }
+}
+
+impl Scheduler for PlatformScheduler {
+ fn block(
+ &self,
+ _session_id: Option<SessionId>,
+ mut future: Pin<&mut dyn Future<Output = ()>>,
+ timeout: Option<Duration>,
+ ) -> bool {
+ let deadline = timeout.map(|t| Instant::now() + t);
+ let parker = parking::Parker::new();
+ let unparker = parker.unparker();
+ let waker = waker_fn(move || {
+ unparker.unpark();
+ });
+ let mut cx = Context::from_waker(&waker);
+
+ loop {
+ match future.as_mut().poll(&mut cx) {
+ Poll::Ready(()) => return true,
+ Poll::Pending => {
+ if let Some(deadline) = deadline {
+ let now = Instant::now();
+ if now >= deadline {
+ return false;
+ }
+ parker.park_timeout(deadline - now);
+ } else {
+ parker.park();
+ }
+ }
+ }
+ }
+ }
+
+ fn schedule_foreground(&self, _session_id: SessionId, runnable: Runnable<RunnableMeta>) {
+ self.dispatcher
+ .dispatch_on_main_thread(runnable, Priority::default());
+ }
+
+ fn schedule_background_with_priority(
+ &self,
+ runnable: Runnable<RunnableMeta>,
+ priority: Priority,
+ ) {
+ self.dispatcher.dispatch(runnable, priority);
+ }
+
+ fn timer(&self, duration: Duration) -> Timer {
+ use std::sync::{Arc, atomic::AtomicBool};
+
+ let (tx, rx) = oneshot::channel();
+ let dispatcher = self.dispatcher.clone();
+
+ // Create a runnable that will send the completion signal
+ let location = std::panic::Location::caller();
+ let closed = Arc::new(AtomicBool::new(false));
+ let (runnable, _task) = async_task::Builder::new()
+ .metadata(RunnableMeta { location, closed })
+ .spawn(
+ move |_| async move {
+ let _ = tx.send(());
+ },
+ move |runnable| {
+ dispatcher.dispatch_after(duration, runnable);
+ },
+ );
+ runnable.schedule();
+
+ Timer::new(rx)
+ }
+
+ fn clock(&self) -> Arc<dyn Clock> {
+ self.clock.clone()
+ }
+
+ fn as_test(&self) -> Option<&TestScheduler> {
+ None
+ }
+}
+
+/// A production clock that uses the platform dispatcher's time.
+struct PlatformClock {
+ dispatcher: Arc<dyn PlatformDispatcher>,
+}
+
+impl Clock for PlatformClock {
+ fn utc_now(&self) -> DateTime<Utc> {
+ Utc::now()
+ }
+
+ fn now(&self) -> Instant {
+ self.dispatcher.now()
+ }
+}
@@ -217,6 +217,7 @@ impl Drop for ThreadTimings {
}
}
+#[allow(dead_code)] // Used by Linux and Windows dispatchers, not macOS
pub(crate) fn add_task_timing(timing: TaskTiming) {
THREAD_TIMINGS.with(|timings| {
let mut timings = timings.lock();
@@ -42,7 +42,9 @@ impl<T> PriorityQueueState<T> {
let mut queues = self.queues.lock();
match priority {
- Priority::Realtime(_) => unreachable!(),
+ Priority::RealtimeAudio => unreachable!(
+ "Realtime audio priority runs on a dedicated thread and is never queued"
+ ),
Priority::High => queues.high_priority.push_back(item),
Priority::Medium => queues.medium_priority.push_back(item),
Priority::Low => queues.low_priority.push_back(item),
@@ -219,29 +221,29 @@ impl<T> PriorityQueueReceiver<T> {
self.state.recv()?
};
- let high = P::High.probability() * !queues.high_priority.is_empty() as u32;
- let medium = P::Medium.probability() * !queues.medium_priority.is_empty() as u32;
- let low = P::Low.probability() * !queues.low_priority.is_empty() as u32;
+ let high = P::High.weight() * !queues.high_priority.is_empty() as u32;
+ let medium = P::Medium.weight() * !queues.medium_priority.is_empty() as u32;
+ let low = P::Low.weight() * !queues.low_priority.is_empty() as u32;
let mut mass = high + medium + low; //%
if !queues.high_priority.is_empty() {
- let flip = self.rand.random_ratio(P::High.probability(), mass);
+ let flip = self.rand.random_ratio(P::High.weight(), mass);
if flip {
return Ok(queues.high_priority.pop_front());
}
- mass -= P::High.probability();
+ mass -= P::High.weight();
}
if !queues.medium_priority.is_empty() {
- let flip = self.rand.random_ratio(P::Medium.probability(), mass);
+ let flip = self.rand.random_ratio(P::Medium.weight(), mass);
if flip {
return Ok(queues.medium_priority.pop_front());
}
- mass -= P::Medium.probability();
+ mass -= P::Medium.weight();
}
if !queues.low_priority.is_empty() {
- let flip = self.rand.random_ratio(P::Low.probability(), mass);
+ let flip = self.rand.random_ratio(P::Low.weight(), mass);
if flip {
return Ok(queues.low_priority.pop_front());
}
@@ -27,7 +27,6 @@
//! ```
use crate::{Entity, Subscription, TestAppContext, TestDispatcher};
use futures::StreamExt as _;
-use rand::prelude::*;
use smol::channel;
use std::{
env,
@@ -54,7 +53,7 @@ pub fn run_test(
eprintln!("seed = {seed}");
}
let result = panic::catch_unwind(|| {
- let dispatcher = TestDispatcher::new(StdRng::seed_from_u64(seed));
+ let dispatcher = TestDispatcher::new(seed);
test_fn(dispatcher, seed);
});
@@ -395,10 +395,9 @@ mod tests {
use crate::{Font, FontFeatures, FontStyle, FontWeight, TestAppContext, TestDispatcher, font};
#[cfg(target_os = "macos")]
use crate::{TextRun, WindowTextSystem, WrapBoundary};
- use rand::prelude::*;
fn build_wrapper() -> LineWrapper {
- let dispatcher = TestDispatcher::new(StdRng::seed_from_u64(0));
+ let dispatcher = TestDispatcher::new(0);
let cx = TestAppContext::build(dispatcher, None);
let id = cx.text_system().resolve_font(&font(".ZedMono"));
LineWrapper::new(id, px(16.), cx.text_system().platform_text_system.clone())
@@ -217,19 +217,77 @@ slotmap::new_key_type! {
}
thread_local! {
+ /// Fallback arena used when no app-specific arena is active.
+ /// In production, each window draw sets CURRENT_ELEMENT_ARENA to the app's arena.
pub(crate) static ELEMENT_ARENA: RefCell<Arena> = RefCell::new(Arena::new(1024 * 1024));
+
+ /// Points to the current App's element arena during draw operations.
+ /// This allows multiple test Apps to have isolated arenas, preventing
+ /// cross-session corruption when the scheduler interleaves their tasks.
+ static CURRENT_ELEMENT_ARENA: Cell<Option<*const RefCell<Arena>>> = const { Cell::new(None) };
+}
+
+/// Allocates an element in the current arena. Uses the app-specific arena if one
+/// is active (during draw), otherwise falls back to the thread-local ELEMENT_ARENA.
+pub(crate) fn with_element_arena<R>(f: impl FnOnce(&mut Arena) -> R) -> R {
+ CURRENT_ELEMENT_ARENA.with(|current| {
+ if let Some(arena_ptr) = current.get() {
+ // SAFETY: The pointer is valid for the duration of the draw operation
+ // that set it, and we're being called during that same draw.
+ let arena_cell = unsafe { &*arena_ptr };
+ f(&mut arena_cell.borrow_mut())
+ } else {
+ ELEMENT_ARENA.with_borrow_mut(f)
+ }
+ })
+}
+
+/// RAII guard that sets CURRENT_ELEMENT_ARENA for the duration of a draw operation.
+/// When dropped, restores the previous arena (supporting nested draws).
+pub(crate) struct ElementArenaScope {
+ previous: Option<*const RefCell<Arena>>,
+}
+
+impl ElementArenaScope {
+ /// Enter a scope where element allocations use the given arena.
+ pub(crate) fn enter(arena: &RefCell<Arena>) -> Self {
+ let previous = CURRENT_ELEMENT_ARENA.with(|current| {
+ let prev = current.get();
+ current.set(Some(arena as *const RefCell<Arena>));
+ prev
+ });
+ Self { previous }
+ }
+}
+
+impl Drop for ElementArenaScope {
+ fn drop(&mut self) {
+ CURRENT_ELEMENT_ARENA.with(|current| {
+ current.set(self.previous);
+ });
+ }
}
/// Returned when the element arena has been used and so must be cleared before the next draw.
#[must_use]
-pub struct ArenaClearNeeded;
+pub struct ArenaClearNeeded {
+ arena: *const RefCell<Arena>,
+}
impl ArenaClearNeeded {
+ /// Create a new ArenaClearNeeded that will clear the given arena.
+ pub(crate) fn new(arena: &RefCell<Arena>) -> Self {
+ Self {
+ arena: arena as *const RefCell<Arena>,
+ }
+ }
+
/// Clear the element arena.
pub fn clear(self) {
- ELEMENT_ARENA.with_borrow_mut(|element_arena| {
- element_arena.clear();
- });
+ // SAFETY: The arena pointer is valid because ArenaClearNeeded is created
+ // at the end of draw() and must be cleared before the next draw.
+ let arena_cell = unsafe { &*self.arena };
+ arena_cell.borrow_mut().clear();
}
}
@@ -2075,6 +2133,10 @@ impl Window {
/// the contents of the new [`Scene`], use [`Self::present`].
#[profiling::function]
pub fn draw(&mut self, cx: &mut App) -> ArenaClearNeeded {
+ // Set up the per-App arena for element allocation during this draw.
+ // This ensures that multiple test Apps have isolated arenas.
+ let _arena_scope = ElementArenaScope::enter(&cx.element_arena);
+
self.invalidate_entities();
cx.entities.clear_accessed();
debug_assert!(self.rendered_entity_stack.is_empty());
@@ -2142,7 +2204,7 @@ impl Window {
self.invalidator.set_phase(DrawPhase::None);
self.needs_present.set(true);
- ArenaClearNeeded
+ ArenaClearNeeded::new(&cx.element_arena)
}
fn record_entities_accessed(&mut self, cx: &mut App) {
@@ -191,9 +191,9 @@ fn generate_test_function(
&[#seeds],
#max_retries,
&mut |dispatcher, _seed| {
- let executor = gpui::BackgroundExecutor::new(std::sync::Arc::new(dispatcher.clone()));
+ let foreground_executor = gpui::ForegroundExecutor::new(std::sync::Arc::new(dispatcher.clone()));
#cx_vars
- executor.block_test(#inner_fn_name(#inner_fn_args));
+ foreground_executor.block_test(#inner_fn_name(#inner_fn_args));
#cx_teardowns
},
#on_failure_fn_name
@@ -30,7 +30,7 @@ use fs::MTime;
use futures::channel::oneshot;
use gpui::{
App, AppContext as _, Context, Entity, EventEmitter, HighlightStyle, SharedString, StyledText,
- Task, TaskLabel, TextStyle,
+ Task, TextStyle,
};
use lsp::{LanguageServerId, NumberOrString};
@@ -53,7 +53,7 @@ use std::{
ops::{Deref, Range},
path::PathBuf,
rc,
- sync::{Arc, LazyLock},
+ sync::Arc,
time::{Duration, Instant},
vec,
};
@@ -76,10 +76,6 @@ pub use {tree_sitter_python, tree_sitter_rust, tree_sitter_typescript};
pub use lsp::DiagnosticSeverity;
-/// A label for the background task spawned by the buffer to compute
-/// a diff against the contents of its file.
-pub static BUFFER_DIFF_TASK: LazyLock<TaskLabel> = LazyLock::new(TaskLabel::new);
-
/// Indicate whether a [`Buffer`] has permissions to edit.
#[derive(PartialEq, Clone, Copy, Debug)]
pub enum Capability {
@@ -1892,7 +1888,7 @@ impl Buffer {
if let Some(indent_sizes) = self.compute_autoindents() {
let indent_sizes = cx.background_spawn(indent_sizes);
match cx
- .background_executor()
+ .foreground_executor()
.block_with_timeout(block_budget, indent_sizes)
{
Ok(indent_sizes) => self.apply_autoindents(indent_sizes, cx),
@@ -2151,18 +2147,17 @@ impl Buffer {
pub fn diff(&self, mut new_text: String, cx: &App) -> Task<Diff> {
let old_text = self.as_rope().clone();
let base_version = self.version();
- cx.background_executor()
- .spawn_labeled(*BUFFER_DIFF_TASK, async move {
- let old_text = old_text.to_string();
- let line_ending = LineEnding::detect(&new_text);
- LineEnding::normalize(&mut new_text);
- let edits = text_diff(&old_text, &new_text);
- Diff {
- base_version,
- line_ending,
- edits,
- }
- })
+ cx.background_spawn(async move {
+ let old_text = old_text.to_string();
+ let line_ending = LineEnding::detect(&new_text);
+ LineEnding::normalize(&mut new_text);
+ let edits = text_diff(&old_text, &new_text);
+ Diff {
+ base_version,
+ line_ending,
+ edits,
+ }
+ })
}
/// Spawns a background task that searches the buffer for any whitespace
@@ -2962,8 +2962,8 @@ fn test_serialization(cx: &mut gpui::App) {
let state = buffer1.read(cx).to_proto(cx);
let ops = cx
- .background_executor()
- .block(buffer1.read(cx).serialize_ops(None, cx));
+ .foreground_executor()
+ .block_on(buffer1.read(cx).serialize_ops(None, cx));
let buffer2 = cx.new(|cx| {
let mut buffer =
Buffer::from_proto(ReplicaId::new(1), Capability::ReadWrite, state, None).unwrap();
@@ -3300,8 +3300,8 @@ fn test_random_collaboration(cx: &mut App, mut rng: StdRng) {
let buffer = cx.new(|cx| {
let state = base_buffer.read(cx).to_proto(cx);
let ops = cx
- .background_executor()
- .block(base_buffer.read(cx).serialize_ops(None, cx));
+ .foreground_executor()
+ .block_on(base_buffer.read(cx).serialize_ops(None, cx));
let mut buffer =
Buffer::from_proto(ReplicaId::new(i as u16), Capability::ReadWrite, state, None)
.unwrap();
@@ -3415,8 +3415,8 @@ fn test_random_collaboration(cx: &mut App, mut rng: StdRng) {
50..=59 if replica_ids.len() < max_peers => {
let old_buffer_state = buffer.read(cx).to_proto(cx);
let old_buffer_ops = cx
- .background_executor()
- .block(buffer.read(cx).serialize_ops(None, cx));
+ .foreground_executor()
+ .block_on(buffer.read(cx).serialize_ops(None, cx));
let new_replica_id = (0..=replica_ids.len() as u16)
.map(ReplicaId::new)
.filter(|replica_id| *replica_id != buffer.read(cx).replica_id())
@@ -496,6 +496,11 @@ impl LanguageRegistry {
servers_rx
}
+ #[cfg(any(feature = "test-support", test))]
+ pub fn has_fake_lsp_server(&self, lsp_name: &LanguageServerName) -> bool {
+ self.state.read().fake_server_entries.contains_key(lsp_name)
+ }
+
/// Adds a language to the registry, which can be loaded if needed.
pub fn register_language(
&self,
@@ -1133,10 +1138,9 @@ impl LanguageRegistry {
binary: lsp::LanguageServerBinary,
cx: &mut gpui::AsyncApp,
) -> Option<lsp::LanguageServer> {
- use gpui::AppContext as _;
-
let mut state = self.state.write();
let fake_entry = state.fake_server_entries.get_mut(name)?;
+
let (server, mut fake_server) = lsp::FakeLanguageServer::new(
server_id,
binary,
@@ -1150,17 +1154,9 @@ impl LanguageRegistry {
initializer(&mut fake_server);
}
- let tx = fake_entry.tx.clone();
- cx.background_spawn(async move {
- if fake_server
- .try_receive_notification::<lsp::notification::Initialized>()
- .await
- .is_some()
- {
- tx.unbounded_send(fake_server.clone()).ok();
- }
- })
- .detach();
+ // Emit synchronously so tests can reliably observe server creation even if the LSP startup
+ // task hasn't progressed to initialization yet.
+ fake_entry.tx.unbounded_send(fake_server).ok();
Some(server)
}
@@ -48,18 +48,17 @@ pub struct State {
codestral_api_key_state: Entity<ApiKeyState>,
}
-struct CodestralApiKey(Entity<ApiKeyState>);
-impl Global for CodestralApiKey {}
-
pub fn codestral_api_key(cx: &mut App) -> Entity<ApiKeyState> {
- if cx.has_global::<CodestralApiKey>() {
- cx.global::<CodestralApiKey>().0.clone()
- } else {
- let api_key_state = cx
- .new(|_| ApiKeyState::new(CODESTRAL_API_URL.into(), CODESTRAL_API_KEY_ENV_VAR.clone()));
- cx.set_global(CodestralApiKey(api_key_state.clone()));
- api_key_state
- }
+ // IMPORTANT:
+ // Do not store `Entity<T>` handles in process-wide statics (e.g. `OnceLock`).
+ //
+ // `Entity<T>` is tied to a particular `App`/entity-map context. Caching it globally can
+ // cause panics like "used a entity with the wrong context" when tests (or multiple apps)
+ // create distinct `App` instances in the same process.
+ //
+ // If we want a per-process singleton, store plain data (e.g. env var names) and create
+ // the entity per-App instead.
+ cx.new(|_| ApiKeyState::new(CODESTRAL_API_URL.into(), CODESTRAL_API_KEY_ENV_VAR.clone()))
}
impl State {
@@ -1422,8 +1422,8 @@ mod tests {
// Validate that all models are supported by tiktoken-rs
for model in Model::iter() {
let count = cx
- .executor()
- .block(count_open_ai_tokens(
+ .foreground_executor()
+ .block_on(count_open_ai_tokens(
request.clone(),
model,
&cx.app.borrow(),
@@ -47,17 +47,14 @@ impl LiveKitStream {
);
let (queue_input, queue_output) = rodio::queue::queue(true);
// spawn rtc stream
- let receiver_task = executor.spawn_with_priority(
- gpui::Priority::Realtime(gpui::RealtimePriority::Audio),
- {
- async move {
- while let Some(frame) = stream.next().await {
- let samples = frame_to_samplesbuffer(frame);
- queue_input.append(samples);
- }
+ let receiver_task = executor.spawn_with_priority(gpui::Priority::RealtimeAudio, {
+ async move {
+ while let Some(frame) = stream.next().await {
+ let samples = frame_to_samplesbuffer(frame);
+ queue_input.append(samples);
}
- },
- );
+ }
+ });
LiveKitStream {
_receiver_task: receiver_task,
@@ -1746,13 +1746,11 @@ impl FakeLanguageServer {
T: request::Request,
T::Result: 'static + Send,
{
- self.server.executor.start_waiting();
self.server.request::<T>(params).await
}
/// Attempts [`Self::try_receive_notification`], unwrapping if it has not received the specified type yet.
pub async fn receive_notification<T: notification::Notification>(&mut self) -> T::Params {
- self.server.executor.start_waiting();
self.try_receive_notification::<T>().await.unwrap()
}
@@ -125,7 +125,7 @@ impl ProfilerWindow {
loop {
let data = cx
.foreground_executor()
- .dispatcher
+ .dispatcher()
.get_current_thread_timings();
this.update(cx, |this: &mut ProfilerWindow, cx| {
@@ -78,8 +78,8 @@ fn test_remote(cx: &mut App) {
let guest_buffer = cx.new(|cx| {
let state = host_buffer.read(cx).to_proto(cx);
let ops = cx
- .background_executor()
- .block(host_buffer.read(cx).serialize_ops(None, cx));
+ .foreground_executor()
+ .block_on(host_buffer.read(cx).serialize_ops(None, cx));
let mut buffer =
Buffer::from_proto(ReplicaId::REMOTE_SERVER, Capability::ReadWrite, state, None)
.unwrap();
@@ -1182,7 +1182,7 @@ impl SettingsObserver {
) -> Task<()> {
let mut user_tasks_file_rx =
watch_config_file(cx.background_executor(), fs, file_path.clone());
- let user_tasks_content = cx.background_executor().block(user_tasks_file_rx.next());
+ let user_tasks_content = cx.foreground_executor().block_on(user_tasks_file_rx.next());
let weak_entry = cx.weak_entity();
cx.spawn(async move |settings_observer, cx| {
let Ok(task_store) = settings_observer.read_with(cx, |settings_observer, _| {
@@ -1233,7 +1233,7 @@ impl SettingsObserver {
) -> Task<()> {
let mut user_tasks_file_rx =
watch_config_file(cx.background_executor(), fs, file_path.clone());
- let user_tasks_content = cx.background_executor().block(user_tasks_file_rx.next());
+ let user_tasks_content = cx.foreground_executor().block_on(user_tasks_file_rx.next());
let weak_entry = cx.weak_entity();
cx.spawn(async move |settings_observer, cx| {
let Ok(task_store) = settings_observer.read_with(cx, |settings_observer, _| {
@@ -9,8 +9,7 @@ use crate::{
};
use async_trait::async_trait;
use buffer_diff::{
- BufferDiffEvent, CALCULATE_DIFF_TASK, DiffHunkSecondaryStatus, DiffHunkStatus,
- DiffHunkStatusKind, assert_hunks,
+ BufferDiffEvent, DiffHunkSecondaryStatus, DiffHunkStatus, DiffHunkStatusKind, assert_hunks,
};
use fs::FakeFs;
use futures::{StreamExt, future};
@@ -211,8 +210,8 @@ async fn test_editorconfig_support(cx: &mut gpui::TestAppContext) {
.languages()
.load_language_for_file_path(file.path.as_std_path());
let file_language = cx
- .background_executor()
- .block(file_language)
+ .foreground_executor()
+ .block_on(file_language)
.expect("Failed to get file language");
let file = file as _;
language_settings(Some(file_language.name()), Some(&file), cx).into_owned()
@@ -1462,6 +1461,7 @@ async fn test_reporting_fs_changes_to_language_servers(cx: &mut gpui::TestAppCon
let prev_read_dir_count = fs.read_dir_call_count();
let fake_server = fake_servers.next().await.unwrap();
+ cx.executor().run_until_parked();
let server_id = lsp_store.read_with(cx, |lsp_store, _| {
let (id, _) = lsp_store.language_server_statuses().next().unwrap();
id
@@ -2077,6 +2077,7 @@ async fn test_restarting_server_with_diagnostics_running(cx: &mut gpui::TestAppC
let buffer_id = buffer.read_with(cx, |buffer, _| buffer.remote_id());
// Simulate diagnostics starting to update.
let fake_server = fake_servers.next().await.unwrap();
+ cx.executor().run_until_parked();
fake_server.start_progress(progress_token).await;
// Restart the server before the diagnostics finish updating.
@@ -2087,6 +2088,7 @@ async fn test_restarting_server_with_diagnostics_running(cx: &mut gpui::TestAppC
// Simulate the newly started server sending more diagnostics.
let fake_server = fake_servers.next().await.unwrap();
+ cx.executor().run_until_parked();
assert_eq!(
events.next().await.unwrap(),
Event::LanguageServerRemoved(LanguageServerId(0))
@@ -3353,6 +3355,8 @@ async fn test_definition(cx: &mut gpui::TestAppContext) {
.unwrap();
let fake_server = fake_servers.next().await.unwrap();
+ cx.executor().run_until_parked();
+
fake_server.set_request_handler::<lsp::request::GotoDefinition, _, _>(|params, _| async move {
let params = params.text_document_position_params;
assert_eq!(
@@ -3463,6 +3467,7 @@ async fn test_completions_with_text_edit(cx: &mut gpui::TestAppContext) {
.unwrap();
let fake_server = fake_language_servers.next().await.unwrap();
+ cx.executor().run_until_parked();
// When text_edit exists, it takes precedence over insert_text and label
let text = "let a = obj.fqn";
@@ -3546,6 +3551,7 @@ async fn test_completions_with_edit_ranges(cx: &mut gpui::TestAppContext) {
.unwrap();
let fake_server = fake_language_servers.next().await.unwrap();
+ cx.executor().run_until_parked();
let text = "let a = obj.fqn";
// Test 1: When text_edit is None but text_edit_text exists with default edit_range
@@ -3683,6 +3689,7 @@ async fn test_completions_without_edit_ranges(cx: &mut gpui::TestAppContext) {
.unwrap();
let fake_server = fake_language_servers.next().await.unwrap();
+ cx.executor().run_until_parked();
// Test 1: When text_edit is None but insert_text exists (no edit_range in defaults)
let text = "let a = b.fqn";
@@ -3789,6 +3796,7 @@ async fn test_completions_with_carriage_returns(cx: &mut gpui::TestAppContext) {
.unwrap();
let fake_server = fake_language_servers.next().await.unwrap();
+ cx.executor().run_until_parked();
let text = "let a = b.fqn";
buffer.update(cx, |buffer, cx| buffer.set_text(text, cx));
@@ -3863,6 +3871,7 @@ async fn test_apply_code_actions_with_commands(cx: &mut gpui::TestAppContext) {
.unwrap();
let fake_server = fake_language_servers.next().await.unwrap();
+ cx.executor().run_until_parked();
// Language server returns code actions that contain commands, and not edits.
let actions = project.update(cx, |project, cx| {
@@ -4204,10 +4213,6 @@ async fn test_file_changes_multiple_times_on_disk(cx: &mut gpui::TestAppContext)
.await
.unwrap();
- // Simulate buffer diffs being slow, so that they don't complete before
- // the next file change occurs.
- cx.executor().deprioritize(*language::BUFFER_DIFF_TASK);
-
// Change the buffer's file on disk, and then wait for the file change
// to be detected by the worktree, so that the buffer starts reloading.
fs.save(
@@ -4259,10 +4264,6 @@ async fn test_edit_buffer_while_it_reloads(cx: &mut gpui::TestAppContext) {
.await
.unwrap();
- // Simulate buffer diffs being slow, so that they don't complete before
- // the next file change occurs.
- cx.executor().deprioritize(*language::BUFFER_DIFF_TASK);
-
// Change the buffer's file on disk, and then wait for the file change
// to be detected by the worktree, so that the buffer starts reloading.
fs.save(
@@ -5380,6 +5381,7 @@ async fn test_lsp_rename_notifications(cx: &mut gpui::TestAppContext) {
.unwrap();
let fake_server = fake_servers.next().await.unwrap();
+ cx.executor().run_until_parked();
let response = project.update(cx, |project, cx| {
let worktree = project.worktrees(cx).next().unwrap();
let entry = worktree
@@ -5491,6 +5493,7 @@ async fn test_rename(cx: &mut gpui::TestAppContext) {
.unwrap();
let fake_server = fake_servers.next().await.unwrap();
+ cx.executor().run_until_parked();
let response = project.update(cx, |project, cx| {
project.prepare_rename(buffer.clone(), 7, cx)
@@ -7994,18 +7997,13 @@ async fn test_staging_hunks_with_delayed_fs_event(cx: &mut gpui::TestAppContext)
#[gpui::test(iterations = 25)]
async fn test_staging_random_hunks(
mut rng: StdRng,
- executor: BackgroundExecutor,
+ _executor: BackgroundExecutor,
cx: &mut gpui::TestAppContext,
) {
let operations = env::var("OPERATIONS")
.map(|i| i.parse().expect("invalid `OPERATIONS` variable"))
.unwrap_or(20);
- // Try to induce races between diff recalculation and index writes.
- if rng.random_bool(0.5) {
- executor.deprioritize(*CALCULATE_DIFF_TASK);
- }
-
use DiffHunkSecondaryStatus::*;
init_test(cx);
@@ -62,7 +62,11 @@ use ui::{
ScrollAxes, ScrollableHandle, Scrollbars, StickyCandidate, Tooltip, WithScrollbar, prelude::*,
v_flex,
};
-use util::{ResultExt, TakeUntilExt, TryFutureExt, maybe, paths::compare_paths, rel_path::RelPath};
+use util::{
+ ResultExt, TakeUntilExt, TryFutureExt, maybe,
+ paths::compare_paths,
+ rel_path::{RelPath, RelPathBuf},
+};
use workspace::{
DraggedSelection, OpenInTerminal, OpenOptions, OpenVisible, PreviewTabsSettings, SelectedEntry,
SplitDirection, Workspace,
@@ -3212,13 +3216,14 @@ impl ProjectPanel {
destination: ProjectEntryId,
destination_is_file: bool,
cx: &mut Context<Self>,
- ) {
+ ) -> Option<Task<Result<CreatedEntry>>> {
if self
.project
.read(cx)
.entry_is_worktree_root(entry_to_move, cx)
{
- self.move_worktree_root(entry_to_move, destination, cx)
+ self.move_worktree_root(entry_to_move, destination, cx);
+ None
} else {
self.move_worktree_entry(entry_to_move, destination, destination_is_file, cx)
}
@@ -3253,38 +3258,53 @@ impl ProjectPanel {
destination_entry: ProjectEntryId,
destination_is_file: bool,
cx: &mut Context<Self>,
- ) {
+ ) -> Option<Task<Result<CreatedEntry>>> {
if entry_to_move == destination_entry {
- return;
+ return None;
}
- let destination_worktree = self.project.update(cx, |project, cx| {
- let source_path = project.path_for_entry(entry_to_move, cx)?;
- let destination_path = project.path_for_entry(destination_entry, cx)?;
+ let (destination_worktree, rename_task) = self.project.update(cx, |project, cx| {
+ let Some(source_path) = project.path_for_entry(entry_to_move, cx) else {
+ return (None, None);
+ };
+ let Some(destination_path) = project.path_for_entry(destination_entry, cx) else {
+ return (None, None);
+ };
let destination_worktree_id = destination_path.worktree_id;
- let mut destination_path = destination_path.path.as_ref();
- if destination_is_file {
- destination_path = destination_path.parent()?;
- }
+ let destination_dir = if destination_is_file {
+ destination_path.path.parent().unwrap_or(RelPath::empty())
+ } else {
+ destination_path.path.as_ref()
+ };
+
+ let Some(source_name) = source_path.path.file_name() else {
+ return (None, None);
+ };
+ let Ok(source_name) = RelPath::unix(source_name) else {
+ return (None, None);
+ };
- let mut new_path = destination_path.to_rel_path_buf();
- new_path.push(RelPath::unix(source_path.path.file_name()?).unwrap());
- if new_path.as_rel_path() != source_path.path.as_ref() {
- let task = project.rename_entry(
+ let mut new_path = destination_dir.to_rel_path_buf();
+ new_path.push(source_name);
+ let rename_task = (new_path.as_rel_path() != source_path.path.as_ref()).then(|| {
+ project.rename_entry(
entry_to_move,
(destination_worktree_id, new_path).into(),
cx,
- );
- cx.foreground_executor().spawn(task).detach_and_log_err(cx);
- }
+ )
+ });
- project.worktree_id_for_entry(destination_entry, cx)
+ (
+ project.worktree_id_for_entry(destination_entry, cx),
+ rename_task,
+ )
});
if let Some(destination_worktree) = destination_worktree {
self.expand_entry(destination_worktree, destination_entry, cx);
}
+ rename_task
}
fn index_for_selection(&self, selection: SelectedEntry) -> Option<(usize, usize, usize)> {
@@ -3995,8 +4015,122 @@ impl ProjectPanel {
Some(())
});
} else {
+ let update_marks = !self.marked_entries.is_empty();
+ let active_selection = selections.active_selection;
+
+ // For folded selections, track the leaf suffix relative to the resolved
+ // entry so we can refresh it after the move completes.
+ let (folded_selection_info, folded_selection_entries): (
+ Vec<(ProjectEntryId, RelPathBuf)>,
+ HashSet<SelectedEntry>,
+ ) = {
+ let project = self.project.read(cx);
+ let mut info = Vec::new();
+ let mut folded_entries = HashSet::default();
+
+ for selection in selections.items() {
+ let resolved_id = self.resolve_entry(selection.entry_id);
+ if resolved_id == selection.entry_id {
+ continue;
+ }
+ folded_entries.insert(*selection);
+ let Some(source_path) = project.path_for_entry(resolved_id, cx) else {
+ continue;
+ };
+ let Some(leaf_path) = project.path_for_entry(selection.entry_id, cx) else {
+ continue;
+ };
+ let Ok(suffix) = leaf_path.path.strip_prefix(source_path.path.as_ref()) else {
+ continue;
+ };
+ if suffix.as_unix_str().is_empty() {
+ continue;
+ }
+
+ info.push((resolved_id, suffix.to_rel_path_buf()));
+ }
+ (info, folded_entries)
+ };
+
+ // Collect move tasks paired with their source entry ID so we can correlate
+ // results with folded selections that need refreshing.
+ let mut move_tasks: Vec<(ProjectEntryId, Task<Result<CreatedEntry>>)> = Vec::new();
for entry in entries {
- self.move_entry(entry.entry_id, target_entry_id, is_file, cx);
+ if let Some(task) = self.move_entry(entry.entry_id, target_entry_id, is_file, cx) {
+ move_tasks.push((entry.entry_id, task));
+ }
+ }
+
+ if move_tasks.is_empty() {
+ return;
+ }
+
+ if folded_selection_info.is_empty() {
+ for (_, task) in move_tasks {
+ task.detach_and_log_err(cx);
+ }
+ } else {
+ cx.spawn_in(window, async move |project_panel, cx| {
+ // Await all move tasks and collect successful results
+ let mut move_results: Vec<(ProjectEntryId, Entry)> = Vec::new();
+ for (entry_id, task) in move_tasks {
+ if let Some(CreatedEntry::Included(new_entry)) = task.await.log_err() {
+ move_results.push((entry_id, new_entry));
+ }
+ }
+
+ if move_results.is_empty() {
+ return;
+ }
+
+ // For folded selections, we need to refresh the leaf paths (with suffixes)
+ // because they may not be indexed yet after the parent directory was moved.
+ // First collect the paths to refresh, then refresh them.
+ let paths_to_refresh: Vec<(Entity<Worktree>, Arc<RelPath>)> = project_panel
+ .update(cx, |project_panel, cx| {
+ let project = project_panel.project.read(cx);
+ folded_selection_info
+ .iter()
+ .filter_map(|(resolved_id, suffix)| {
+ let (_, new_entry) =
+ move_results.iter().find(|(id, _)| id == resolved_id)?;
+ let worktree = project.worktree_for_entry(new_entry.id, cx)?;
+ let leaf_path = new_entry.path.join(suffix);
+ Some((worktree, leaf_path))
+ })
+ .collect()
+ })
+ .ok()
+ .unwrap_or_default();
+
+ let refresh_tasks: Vec<_> = paths_to_refresh
+ .into_iter()
+ .filter_map(|(worktree, leaf_path)| {
+ worktree.update(cx, |worktree, cx| {
+ worktree
+ .as_local_mut()
+ .map(|local| local.refresh_entry(leaf_path, None, cx))
+ })
+ })
+ .collect();
+
+ for task in refresh_tasks {
+ task.await.log_err();
+ }
+
+ if update_marks && !folded_selection_entries.is_empty() {
+ project_panel
+ .update(cx, |project_panel, cx| {
+ project_panel.marked_entries.retain(|entry| {
+ !folded_selection_entries.contains(entry)
+ || *entry == active_selection
+ });
+ cx.notify();
+ })
+ .ok();
+ }
+ })
+ .detach();
}
}
}
@@ -63,7 +63,7 @@ impl ProjectSymbolsDelegate {
fn filter(&mut self, query: &str, window: &mut Window, cx: &mut Context<Picker<Self>>) {
const MAX_MATCHES: usize = 100;
- let mut visible_matches = cx.background_executor().block(fuzzy::match_strings(
+ let mut visible_matches = cx.foreground_executor().block_on(fuzzy::match_strings(
&self.visible_match_candidates,
query,
false,
@@ -72,7 +72,7 @@ impl ProjectSymbolsDelegate {
&Default::default(),
cx.background_executor().clone(),
));
- let mut external_matches = cx.background_executor().block(fuzzy::match_strings(
+ let mut external_matches = cx.foreground_executor().block_on(fuzzy::match_strings(
&self.external_match_candidates,
query,
false,
@@ -929,8 +929,8 @@ pub fn handle_settings_file_changes(
settings_changed: impl Fn(Option<anyhow::Error>, &mut App) + 'static,
) {
let server_settings_content = cx
- .background_executor()
- .block(server_settings_file.next())
+ .foreground_executor()
+ .block_on(server_settings_file.next())
.unwrap();
SettingsStore::update_global(cx, |store, cx| {
store
@@ -16,6 +16,7 @@ doctest = false
alacritty_terminal.workspace = true
anyhow.workspace = true
async-dispatcher.workspace = true
+async-task.workspace = true
async-tungstenite = { workspace = true, features = ["tokio", "tokio-rustls-manual-roots", "tokio-runtime"] }
base64.workspace = true
client.workspace = true
@@ -12,7 +12,7 @@ mod session;
use std::{sync::Arc, time::Duration};
use async_dispatcher::{Dispatcher, Runnable, set_dispatcher};
-use gpui::{App, PlatformDispatcher, Priority, RunnableVariant};
+use gpui::{App, PlatformDispatcher, Priority, RunnableMeta};
use project::Fs;
pub use runtimelib::ExecutionState;
@@ -44,18 +44,38 @@ fn zed_dispatcher(cx: &mut App) -> impl Dispatcher {
// just make that consistent so we have this dispatcher ready to go for
// other crates in Zed.
impl Dispatcher for ZedDispatcher {
+ #[track_caller]
fn dispatch(&self, runnable: Runnable) {
- self.dispatcher
- .dispatch(RunnableVariant::Compat(runnable), None, Priority::default());
+ use std::sync::{Arc, atomic::AtomicBool};
+ let location = core::panic::Location::caller();
+ let closed = Arc::new(AtomicBool::new(false));
+ let (wrapper, task) = async_task::Builder::new()
+ .metadata(RunnableMeta { location, closed })
+ .spawn(|_| async move { runnable.run() }, {
+ let dispatcher = self.dispatcher.clone();
+ move |r| dispatcher.dispatch(r, Priority::default())
+ });
+ wrapper.schedule();
+ task.detach();
}
+ #[track_caller]
fn dispatch_after(&self, duration: Duration, runnable: Runnable) {
- self.dispatcher
- .dispatch_after(duration, RunnableVariant::Compat(runnable));
+ use std::sync::{Arc, atomic::AtomicBool};
+ let location = core::panic::Location::caller();
+ let closed = Arc::new(AtomicBool::new(false));
+ let (wrapper, task) = async_task::Builder::new()
+ .metadata(RunnableMeta { location, closed })
+ .spawn(|_| async move { runnable.run() }, {
+ let dispatcher = self.dispatcher.clone();
+ move |r| dispatcher.dispatch_after(duration, r)
+ });
+ wrapper.schedule();
+ task.detach();
}
}
ZedDispatcher {
- dispatcher: cx.background_executor().dispatcher.clone(),
+ dispatcher: cx.background_executor().dispatcher().clone(),
}
}
@@ -0,0 +1,229 @@
+# GPUI Scheduler Integration
+
+This document describes the integration of GPUI's async execution with the scheduler crate, including architecture, design decisions, and lessons learned.
+
+## Goal
+
+Unify GPUI's async execution with the scheduler crate, eliminating duplicate blocking/scheduling logic and enabling deterministic testing.
+
+---
+
+## Architecture
+
+```
+ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
+β GPUI β
+β β
+β ββββββββββββββββββββββββββ ββββββββββββββββββββββββββββββββ β
+β β gpui::Background- β β gpui::ForegroundExecutor β β
+β β Executor β β - inner: scheduler:: β β
+β β - scheduler: Arc< β β ForegroundExecutor β β
+β β dyn Scheduler> β β - dispatcher: Arc β β
+β β - dispatcher: Arc β ββββββββββββββββ¬ββββββββββββββββ β
+β βββββββββββββ¬βββββββββββββ β β
+β β β β
+β β (creates temporary β (wraps) β
+β β scheduler::Background- β β
+β β Executor when spawning) β β
+β β β β
+β β βββββββββββββββββββββββββββββ β
+β β β β
+β βΌ βΌ β
+β βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ β
+β β Arc<dyn Scheduler> β β
+β ββββββββββββββββββββββββββββ¬βββββββββββββββββββββββββββββββββββ β
+β β β
+β ββββββββββββββββββ΄βββββββββββββββββ β
+β β β β
+β βΌ βΌ β
+β βββββββββββββββββββββββββ βββββββββββββββββββββββββββββ β
+β β PlatformScheduler β β TestScheduler β β
+β β (production) β β (deterministic tests) β β
+β βββββββββββββββββββββββββ βββββββββββββββββββββββββββββ β
+ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
+```
+
+### Scheduler Trait
+
+The scheduler crate provides:
+- `Scheduler` trait with `block()`, `schedule_foreground()`, `schedule_background_with_priority()`, `timer()`, `clock()`
+- `TestScheduler` implementation for deterministic testing
+- `ForegroundExecutor` and `BackgroundExecutor` that wrap `Arc<dyn Scheduler>`
+- `Task<T>` type with `ready()`, `is_ready()`, `detach()`, `from_async_task()`
+
+### PlatformScheduler
+
+`PlatformScheduler` in GPUI (`crates/gpui/src/platform_scheduler.rs`):
+- Implements `Scheduler` trait for production use
+- Wraps `PlatformDispatcher` (Mac, Linux, Windows)
+- Uses `parking::Parker` for blocking operations
+- Uses `dispatch_after` for timers
+- Provides a `PlatformClock` that delegates to the dispatcher
+
+### GPUI Executors
+
+GPUI's executors (`crates/gpui/src/executor.rs`):
+- `gpui::ForegroundExecutor` wraps `scheduler::ForegroundExecutor` internally
+- `gpui::BackgroundExecutor` holds `Arc<dyn Scheduler>` directly
+- Select `TestScheduler` or `PlatformScheduler` based on dispatcher type
+- Wrap `scheduler::Task<T>` in a thin `gpui::Task<T>` that adds `detach_and_log_err()`
+- Use `Scheduler::block()` for all blocking operations
+
+---
+
+## Design Decisions
+
+### Key Design Principles
+
+1. **No optional fields**: Both test and production paths use the same executor types with different `Scheduler` implementations underneath.
+
+2. **Scheduler owns blocking logic**: The `Scheduler::block()` method handles all blocking, including timeout and task stepping (for tests).
+
+3. **GPUI Task wrapper**: Thin wrapper around `scheduler::Task` that adds `detach_and_log_err()` which requires `&App`.
+
+### Foreground Priority Not Supported
+
+`ForegroundExecutor::spawn_with_priority` accepts a priority parameter but ignores it. This is acceptable because:
+- macOS (primary platform) ignores foreground priority anyway
+- TestScheduler runs foreground tasks in order
+- There are no external callers of this method in the codebase
+
+### Session IDs for Foreground Isolation
+
+Each `ForegroundExecutor` gets a `SessionId` to prevent reentrancy when blocking. This ensures that when blocking on a future, we don't run foreground tasks from the same session.
+
+### Runtime Scheduler Selection
+
+In test builds, we check `dispatcher.as_test()` to choose between `TestScheduler` and `PlatformScheduler`. This allows the same executor types to work in both test and production environments.
+
+### Profiler Integration
+
+The profiler task timing infrastructure continues to work because:
+- `PlatformScheduler::schedule_background_with_priority` calls `dispatcher.dispatch()`
+- `PlatformScheduler::schedule_foreground` calls `dispatcher.dispatch_on_main_thread()`
+- All platform dispatchers wrap task execution with profiler timing
+
+---
+
+## Intentional Removals
+
+### `spawn_labeled` and `deprioritize`
+
+**What was removed**:
+- `BackgroundExecutor::spawn_labeled(label: TaskLabel, future)`
+- `BackgroundExecutor::deprioritize(label: TaskLabel)`
+- `TaskLabel` type
+
+**Why**: These were only used in a few places for test ordering control. The new priority-weighted scheduling in `TestScheduler` provides similar functionality through `Priority::High/Medium/Low`.
+
+**Migration**: Use `spawn()` instead of `spawn_labeled()`. For test ordering, use explicit synchronization (channels, etc.) or priority levels.
+
+### `start_waiting` / `finish_waiting` Debug Methods
+
+**What was removed**:
+- `BackgroundExecutor::start_waiting()`
+- `BackgroundExecutor::finish_waiting()`
+- Associated `waiting_backtrace` tracking in TestDispatcher
+
+**Why**: The new `TracingWaker` in `TestScheduler` provides better debugging capability. Run tests with `PENDING_TRACES=1` to see backtraces of all pending futures when parking is forbidden.
+
+### Realtime Priority
+
+**What was removed**: `Priority::Realtime` variant and associated OS thread spawning.
+
+**Why**: There were no in-tree call sites using realtime priority. The correctness/backpressure semantics are non-trivial:
+- Blocking enqueue risks stalling latency-sensitive threads
+- Non-blocking enqueue implies dropping runnables under load, which breaks correctness for general futures
+
+Rather than ship ambiguous or risky semantics, we removed the API until there is a concrete in-tree use case.
+
+---
+
+## Lessons Learned
+
+These lessons were discovered during integration testing and represent important design constraints.
+
+### 1. Never Cache `Entity<T>` in Process-Wide Statics
+
+**Problem**: `gpui::Entity<T>` is a handle tied to a particular `App`'s entity-map. Storing an `Entity<T>` in a process-wide static (`OnceLock`, `LazyLock`, etc.) and reusing it across different `App` instances causes:
+- "used a entity with the wrong context" panics
+- `Option::unwrap()` failures in leak-detection clone paths
+- Nondeterministic behavior depending on test ordering
+
+**Solution**: Cache plain data (env var name, URL, etc.) in statics, and create `Entity<T>` per-`App`.
+
+**Guideline**: Never store `gpui::Entity<T>` or other `App`-context-bound handles in process-wide statics unless explicitly keyed by `App` identity.
+
+### 2. `block_with_timeout` Behavior Depends on Tick Budget
+
+**Problem**: In `TestScheduler`, "timeout" behavior depends on an internal tick budget (`timeout_ticks`), not just elapsed wall-clock time. During the allotted ticks, the scheduler can poll futures and step other tasks.
+
+**Implications**:
+- A future can complete "within a timeout" in tests due to scheduler progress, even without explicit `advance_clock()`
+- Yielding does not advance time
+- If a test needs time to advance, it must do so explicitly via `advance_clock()`
+
+**For deterministic timeout tests**: Set `scheduler.set_timeout_ticks(0..=0)` to prevent any scheduler stepping during timeout, then explicitly advance time.
+
+### 3. Realtime Priority Must Panic in Tests
+
+**Problem**: `Priority::Realtime` spawns dedicated OS threads outside the test scheduler, which breaks determinism and causes hangs/flakes.
+
+**Solution**: The test dispatcher's `spawn_realtime` implementation panics with a clear message. This is an enforced invariant, not an implementation detail.
+
+---
+
+## Test Helpers
+
+Test-only methods on `BackgroundExecutor`:
+- `block_test()` - for running async tests synchronously
+- `advance_clock()` - move simulated time forward
+- `tick()` - run one task
+- `run_until_parked()` - run all ready tasks
+- `allow_parking()` / `forbid_parking()` - control parking behavior
+- `simulate_random_delay()` - yield randomly for fuzzing
+- `rng()` - access seeded RNG
+- `set_block_on_ticks()` - configure timeout tick range for block operations
+
+---
+
+## Code Quality Notes
+
+### `dispatch_after` Panics in TestDispatcher
+
+This is intentional:
+```rust
+fn dispatch_after(&self, _duration: Duration, _runnable: RunnableVariant) {
+ panic!(
+ "dispatch_after should not be called in tests. \
+ Use BackgroundExecutor::timer() which uses the scheduler's native timer."
+ );
+}
+```
+
+In tests, `TestScheduler::timer()` creates native timers without using `dispatch_after`. Any code hitting this panic has a bug.
+
+---
+
+## Files Changed
+
+Key files modified during integration:
+
+- `crates/scheduler/src/scheduler.rs` - `Scheduler::block()` signature takes `Pin<&mut dyn Future>` and returns `bool`
+- `crates/scheduler/src/executor.rs` - Added `from_async_task()`
+- `crates/scheduler/src/test_scheduler.rs` - Deterministic scheduling implementation
+- `crates/gpui/src/executor.rs` - Rewritten to use scheduler executors
+- `crates/gpui/src/platform_scheduler.rs` - New file implementing `Scheduler` for production
+- `crates/gpui/src/platform/test/dispatcher.rs` - Simplified to delegate to TestScheduler
+- `crates/gpui/src/platform.rs` - Simplified `RunnableVariant`, removed `TaskLabel`
+- Platform dispatchers (mac/linux/windows) - Removed label parameter from dispatch
+
+---
+
+## Future Considerations
+
+1. **Foreground priority support**: If needed, add `schedule_foreground_with_priority` to the `Scheduler` trait.
+
+2. **Profiler integration in scheduler**: Could move task timing into the scheduler crate for more consistent profiling.
+
+3. **Additional test utilities**: The `TestScheduler` could be extended with more debugging/introspection capabilities.
@@ -1,5 +1,4 @@
-use crate::{Scheduler, SessionId, Timer};
-use futures::FutureExt as _;
+use crate::{Priority, RunnableMeta, Scheduler, SessionId, Timer};
use std::{
future::Future,
marker::PhantomData,
@@ -7,16 +6,20 @@ use std::{
panic::Location,
pin::Pin,
rc::Rc,
- sync::Arc,
+ sync::{
+ Arc,
+ atomic::{AtomicBool, Ordering},
+ },
task::{Context, Poll},
thread::{self, ThreadId},
- time::Duration,
+ time::{Duration, Instant},
};
#[derive(Clone)]
pub struct ForegroundExecutor {
session_id: SessionId,
scheduler: Arc<dyn Scheduler>,
+ closed: Arc<AtomicBool>,
not_send: PhantomData<Rc<()>>,
}
@@ -25,10 +28,29 @@ impl ForegroundExecutor {
Self {
session_id,
scheduler,
+ closed: Arc::new(AtomicBool::new(false)),
not_send: PhantomData,
}
}
+ pub fn session_id(&self) -> SessionId {
+ self.session_id
+ }
+
+ pub fn scheduler(&self) -> &Arc<dyn Scheduler> {
+ &self.scheduler
+ }
+
+ /// Returns the closed flag for this executor.
+ pub fn closed(&self) -> &Arc<AtomicBool> {
+ &self.closed
+ }
+
+ /// Close this executor. Tasks will not run after this is called.
+ pub fn close(&self) {
+ self.closed.store(true, Ordering::SeqCst);
+ }
+
#[track_caller]
pub fn spawn<F>(&self, future: F) -> Task<F::Output>
where
@@ -37,61 +59,122 @@ impl ForegroundExecutor {
{
let session_id = self.session_id;
let scheduler = Arc::clone(&self.scheduler);
- let (runnable, task) = spawn_local_with_source_location(future, move |runnable| {
- scheduler.schedule_foreground(session_id, runnable);
- });
+ let location = Location::caller();
+ let closed = self.closed.clone();
+ let (runnable, task) = spawn_local_with_source_location(
+ future,
+ move |runnable| {
+ scheduler.schedule_foreground(session_id, runnable);
+ },
+ RunnableMeta { location, closed },
+ );
runnable.schedule();
Task(TaskState::Spawned(task))
}
pub fn block_on<Fut: Future>(&self, future: Fut) -> Fut::Output {
- let mut output = None;
- self.scheduler.block(
- Some(self.session_id),
- async { output = Some(future.await) }.boxed_local(),
- None,
- );
- output.unwrap()
+ use std::cell::Cell;
+
+ let output = Cell::new(None);
+ let future = async {
+ output.set(Some(future.await));
+ };
+ let mut future = std::pin::pin!(future);
+
+ self.scheduler
+ .block(Some(self.session_id), future.as_mut(), None);
+
+ output.take().expect("block_on future did not complete")
}
- pub fn block_with_timeout<Fut: Unpin + Future>(
+ /// Block until the future completes or timeout occurs.
+ /// Returns Ok(output) if completed, Err(future) if timed out.
+ pub fn block_with_timeout<Fut: Future>(
&self,
timeout: Duration,
- mut future: Fut,
- ) -> Result<Fut::Output, Fut> {
- let mut output = None;
- self.scheduler.block(
- Some(self.session_id),
- async { output = Some((&mut future).await) }.boxed_local(),
- Some(timeout),
- );
- output.ok_or(future)
+ future: Fut,
+ ) -> Result<Fut::Output, impl Future<Output = Fut::Output> + use<Fut>> {
+ use std::cell::Cell;
+
+ let output = Cell::new(None);
+ let mut future = Box::pin(future);
+
+ {
+ let future_ref = &mut future;
+ let wrapper = async {
+ output.set(Some(future_ref.await));
+ };
+ let mut wrapper = std::pin::pin!(wrapper);
+
+ self.scheduler
+ .block(Some(self.session_id), wrapper.as_mut(), Some(timeout));
+ }
+
+ match output.take() {
+ Some(value) => Ok(value),
+ None => Err(future),
+ }
}
pub fn timer(&self, duration: Duration) -> Timer {
self.scheduler.timer(duration)
}
+
+ pub fn now(&self) -> Instant {
+ self.scheduler.clock().now()
+ }
}
#[derive(Clone)]
pub struct BackgroundExecutor {
scheduler: Arc<dyn Scheduler>,
+ closed: Arc<AtomicBool>,
}
impl BackgroundExecutor {
pub fn new(scheduler: Arc<dyn Scheduler>) -> Self {
- Self { scheduler }
+ Self {
+ scheduler,
+ closed: Arc::new(AtomicBool::new(false)),
+ }
}
+ /// Returns the closed flag for this executor.
+ pub fn closed(&self) -> &Arc<AtomicBool> {
+ &self.closed
+ }
+
+ /// Close this executor. Tasks will not run after this is called.
+ pub fn close(&self) {
+ self.closed.store(true, Ordering::SeqCst);
+ }
+
+ #[track_caller]
pub fn spawn<F>(&self, future: F) -> Task<F::Output>
+ where
+ F: Future + Send + 'static,
+ F::Output: Send + 'static,
+ {
+ self.spawn_with_priority(Priority::default(), future)
+ }
+
+ #[track_caller]
+ pub fn spawn_with_priority<F>(&self, priority: Priority, future: F) -> Task<F::Output>
where
F: Future + Send + 'static,
F::Output: Send + 'static,
{
let scheduler = Arc::clone(&self.scheduler);
- let (runnable, task) = async_task::spawn(future, move |runnable| {
- scheduler.schedule_background(runnable);
- });
+ let location = Location::caller();
+ let closed = self.closed.clone();
+ let (runnable, task) = async_task::Builder::new()
+ .metadata(RunnableMeta { location, closed })
+ .spawn(
+ move |_| future,
+ move |runnable| {
+ scheduler.schedule_background_with_priority(runnable, priority);
+ },
+ );
runnable.schedule();
Task(TaskState::Spawned(task))
}
@@ -100,6 +183,10 @@ impl BackgroundExecutor {
self.scheduler.timer(duration)
}
+ pub fn now(&self) -> Instant {
+ self.scheduler.clock().now()
+ }
+
pub fn scheduler(&self) -> &Arc<dyn Scheduler> {
&self.scheduler
}
@@ -121,7 +208,7 @@ enum TaskState<T> {
Ready(Option<T>),
/// A task that is currently running.
- Spawned(async_task::Task<T>),
+ Spawned(async_task::Task<T, RunnableMeta>),
}
impl<T> Task<T> {
@@ -130,6 +217,11 @@ impl<T> Task<T> {
Task(TaskState::Ready(Some(val)))
}
+ /// Creates a Task from an async_task::Task
+ pub fn from_async_task(task: async_task::Task<T, RunnableMeta>) -> Self {
+ Task(TaskState::Spawned(task))
+ }
+
pub fn is_ready(&self) -> bool {
match &self.0 {
TaskState::Ready(_) => true,
@@ -144,6 +236,63 @@ impl<T> Task<T> {
Task(TaskState::Spawned(task)) => task.detach(),
}
}
+
+ /// Converts this task into a fallible task that returns `Option<T>`.
+ pub fn fallible(self) -> FallibleTask<T> {
+ FallibleTask(match self.0 {
+ TaskState::Ready(val) => FallibleTaskState::Ready(val),
+ TaskState::Spawned(task) => FallibleTaskState::Spawned(task.fallible()),
+ })
+ }
+}
+
+/// A task that returns `Option<T>` instead of panicking when cancelled.
+#[must_use]
+pub struct FallibleTask<T>(FallibleTaskState<T>);
+
+enum FallibleTaskState<T> {
+ /// A task that is ready to return a value
+ Ready(Option<T>),
+
+ /// A task that is currently running (wraps async_task::FallibleTask).
+ Spawned(async_task::FallibleTask<T, RunnableMeta>),
+}
+
+impl<T> FallibleTask<T> {
+ /// Creates a new fallible task that will resolve with the value.
+ pub fn ready(val: T) -> Self {
+ FallibleTask(FallibleTaskState::Ready(Some(val)))
+ }
+
+ /// Detaching a task runs it to completion in the background.
+ pub fn detach(self) {
+ match self.0 {
+ FallibleTaskState::Ready(_) => {}
+ FallibleTaskState::Spawned(task) => task.detach(),
+ }
+ }
+}
+
+impl<T> Future for FallibleTask<T> {
+ type Output = Option<T>;
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
+ match unsafe { self.get_unchecked_mut() } {
+ FallibleTask(FallibleTaskState::Ready(val)) => Poll::Ready(val.take()),
+ FallibleTask(FallibleTaskState::Spawned(task)) => Pin::new(task).poll(cx),
+ }
+ }
+}
+
+impl<T> std::fmt::Debug for FallibleTask<T> {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ match &self.0 {
+ FallibleTaskState::Ready(_) => f.debug_tuple("FallibleTask::Ready").finish(),
+ FallibleTaskState::Spawned(task) => {
+ f.debug_tuple("FallibleTask::Spawned").field(task).finish()
+ }
+ }
+ }
}
impl<T> Future for Task<T> {
@@ -158,18 +307,19 @@ impl<T> Future for Task<T> {
}
/// Variant of `async_task::spawn_local` that includes the source location of the spawn in panics.
-///
-/// Copy-modified from:
-/// <https://github.com/smol-rs/async-task/blob/ca9dbe1db9c422fd765847fa91306e30a6bb58a9/src/runnable.rs#L405>
#[track_caller]
fn spawn_local_with_source_location<Fut, S>(
future: Fut,
schedule: S,
-) -> (async_task::Runnable, async_task::Task<Fut::Output, ()>)
+ metadata: RunnableMeta,
+) -> (
+ async_task::Runnable<RunnableMeta>,
+ async_task::Task<Fut::Output, RunnableMeta>,
+)
where
Fut: Future + 'static,
Fut::Output: 'static,
- S: async_task::Schedule + Send + Sync + 'static,
+ S: async_task::Schedule<RunnableMeta> + Send + Sync + 'static,
{
#[inline]
fn thread_id() -> ThreadId {
@@ -212,12 +362,18 @@ where
}
}
- // Wrap the future into one that checks which thread it's on.
- let future = Checked {
- id: thread_id(),
- inner: ManuallyDrop::new(future),
- location: Location::caller(),
- };
+ let location = metadata.location;
- unsafe { async_task::spawn_unchecked(future, schedule) }
+ unsafe {
+ async_task::Builder::new()
+ .metadata(metadata)
+ .spawn_unchecked(
+ move |_| Checked {
+ id: thread_id(),
+ inner: ManuallyDrop::new(future),
+ location,
+ },
+ schedule,
+ )
+ }
}
@@ -9,32 +9,116 @@ pub use executor::*;
pub use test_scheduler::*;
use async_task::Runnable;
-use futures::{FutureExt as _, channel::oneshot, future::LocalBoxFuture};
+use futures::channel::oneshot;
use std::{
future::Future,
+ panic::Location,
pin::Pin,
- sync::Arc,
+ sync::{
+ Arc,
+ atomic::{AtomicBool, Ordering},
+ },
task::{Context, Poll},
time::Duration,
};
+/// Task priority for background tasks.
+///
+/// Higher priority tasks are more likely to be scheduled before lower priority tasks,
+/// but this is not a strict guarantee - the scheduler may interleave tasks of different
+/// priorities to prevent starvation.
+#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Hash)]
+#[repr(u8)]
+pub enum Priority {
+ /// Realtime priority
+ ///
+ /// Spawning a task with this priority will spin it off on a separate thread dedicated just to that task. Only use for audio.
+ RealtimeAudio,
+ /// High priority - use for tasks critical to user experience/responsiveness.
+ High,
+ /// Medium priority - suitable for most use cases.
+ #[default]
+ Medium,
+ /// Low priority - use for background work that can be deprioritized.
+ Low,
+}
+
+impl Priority {
+ /// Returns the relative probability weight for this priority level.
+ /// Used by schedulers to determine task selection probability.
+ pub const fn weight(self) -> u32 {
+ match self {
+ Priority::High => 60,
+ Priority::Medium => 30,
+ Priority::Low => 10,
+ // realtime priorities are not considered for probability scheduling
+ Priority::RealtimeAudio => 0,
+ }
+ }
+}
+
+/// Metadata attached to runnables for debugging and profiling.
+#[derive(Clone)]
+pub struct RunnableMeta {
+ /// The source location where the task was spawned.
+ pub location: &'static Location<'static>,
+ /// Shared flag indicating whether the scheduler has been closed.
+ /// When true, tasks should be dropped without running.
+ pub closed: Arc<AtomicBool>,
+}
+
+impl RunnableMeta {
+ /// Returns true if the scheduler has been closed and this task should not run.
+ pub fn is_closed(&self) -> bool {
+ self.closed.load(Ordering::SeqCst)
+ }
+}
+
+impl std::fmt::Debug for RunnableMeta {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ f.debug_struct("RunnableMeta")
+ .field("location", &self.location)
+ .field("closed", &self.is_closed())
+ .finish()
+ }
+}
+
pub trait Scheduler: Send + Sync {
+ /// Block until the given future completes or timeout occurs.
+ ///
+ /// Returns `true` if the future completed, `false` if it timed out.
+ /// The future is passed as a pinned mutable reference so the caller
+ /// retains ownership and can continue polling or return it on timeout.
fn block(
&self,
session_id: Option<SessionId>,
- future: LocalBoxFuture<()>,
+ future: Pin<&mut dyn Future<Output = ()>>,
timeout: Option<Duration>,
+ ) -> bool;
+
+ fn schedule_foreground(&self, session_id: SessionId, runnable: Runnable<RunnableMeta>);
+
+ /// Schedule a background task with the given priority.
+ fn schedule_background_with_priority(
+ &self,
+ runnable: Runnable<RunnableMeta>,
+ priority: Priority,
);
- fn schedule_foreground(&self, session_id: SessionId, runnable: Runnable);
- fn schedule_background(&self, runnable: Runnable);
+
+ /// Schedule a background task with default (medium) priority.
+ fn schedule_background(&self, runnable: Runnable<RunnableMeta>) {
+ self.schedule_background_with_priority(runnable, Priority::default());
+ }
+
fn timer(&self, timeout: Duration) -> Timer;
fn clock(&self) -> Arc<dyn Clock>;
- fn as_test(&self) -> &TestScheduler {
- panic!("this is not a test scheduler")
+
+ fn as_test(&self) -> Option<&TestScheduler> {
+ None
}
}
-#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
+#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub struct SessionId(u16);
impl SessionId {
@@ -55,7 +139,7 @@ impl Future for Timer {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<()> {
- match self.0.poll_unpin(cx) {
+ match Pin::new(&mut self.0).poll(cx) {
Poll::Ready(_) => Poll::Ready(()),
Poll::Pending => Poll::Pending,
}
@@ -1,14 +1,18 @@
use crate::{
- BackgroundExecutor, Clock, ForegroundExecutor, Scheduler, SessionId, TestClock, Timer,
+ BackgroundExecutor, Clock, ForegroundExecutor, Priority, RunnableMeta, Scheduler, SessionId,
+ TestClock, Timer,
};
use async_task::Runnable;
use backtrace::{Backtrace, BacktraceFrame};
-use futures::{FutureExt as _, channel::oneshot, future::LocalBoxFuture};
-use parking_lot::Mutex;
-use rand::prelude::*;
+use futures::channel::oneshot;
+use parking_lot::{Mutex, MutexGuard};
+use rand::{
+ distr::{StandardUniform, uniform::SampleRange, uniform::SampleUniform},
+ prelude::*,
+};
use std::{
any::type_name_of_val,
- collections::{BTreeMap, VecDeque},
+ collections::{BTreeMap, HashSet, VecDeque},
env,
fmt::Write,
future::Future,
@@ -90,6 +94,7 @@ impl TestScheduler {
capture_pending_traces: config.capture_pending_traces,
pending_traces: BTreeMap::new(),
next_trace_id: TraceId(0),
+ is_main_thread: true,
})),
clock: Arc::new(TestClock::new()),
thread: thread::current(),
@@ -100,8 +105,8 @@ impl TestScheduler {
self.clock.clone()
}
- pub fn rng(&self) -> Arc<Mutex<StdRng>> {
- self.rng.clone()
+ pub fn rng(&self) -> SharedRng {
+ SharedRng(self.rng.clone())
}
pub fn set_timeout_ticks(&self, timeout_ticks: RangeInclusive<usize>) {
@@ -116,13 +121,25 @@ impl TestScheduler {
self.state.lock().allow_parking = false;
}
+ pub fn parking_allowed(&self) -> bool {
+ self.state.lock().allow_parking
+ }
+
+ pub fn is_main_thread(&self) -> bool {
+ self.state.lock().is_main_thread
+ }
+
+ /// Allocate a new session ID for foreground task scheduling.
+ /// This is used by GPUI's TestDispatcher to map dispatcher instances to sessions.
+ pub fn allocate_session_id(&self) -> SessionId {
+ let mut state = self.state.lock();
+ state.next_session_id.0 += 1;
+ state.next_session_id
+ }
+
/// Create a foreground executor for this scheduler
pub fn foreground(self: &Arc<Self>) -> ForegroundExecutor {
- let session_id = {
- let mut state = self.state.lock();
- state.next_session_id.0 += 1;
- state.next_session_id
- };
+ let session_id = self.allocate_session_id();
ForegroundExecutor::new(session_id, self.clone())
}
@@ -152,38 +169,159 @@ impl TestScheduler {
}
}
+ /// Execute one tick of the scheduler, processing expired timers and running
+ /// at most one task. Returns true if any work was done.
+ ///
+ /// This is the public interface for GPUI's TestDispatcher to drive task execution.
+ pub fn tick(&self) -> bool {
+ self.step_filtered(false)
+ }
+
+ /// Execute one tick, but only run background tasks (no foreground/session tasks).
+ /// Returns true if any work was done.
+ pub fn tick_background_only(&self) -> bool {
+ self.step_filtered(true)
+ }
+
+ /// Check if there are any pending tasks or timers that could run.
+ pub fn has_pending_tasks(&self) -> bool {
+ let state = self.state.lock();
+ !state.runnables.is_empty() || !state.timers.is_empty()
+ }
+
+ /// Returns counts of (foreground_tasks, background_tasks) currently queued.
+ /// Foreground tasks are those with a session_id, background tasks have none.
+ pub fn pending_task_counts(&self) -> (usize, usize) {
+ let state = self.state.lock();
+ let foreground = state
+ .runnables
+ .iter()
+ .filter(|r| r.session_id.is_some())
+ .count();
+ let background = state
+ .runnables
+ .iter()
+ .filter(|r| r.session_id.is_none())
+ .count();
+ (foreground, background)
+ }
+
fn step(&self) -> bool {
- let elapsed_timers = {
+ self.step_filtered(false)
+ }
+
+ fn step_filtered(&self, background_only: bool) -> bool {
+ let (elapsed_count, runnables_before) = {
let mut state = self.state.lock();
let end_ix = state
.timers
.partition_point(|timer| timer.expiration <= self.clock.now());
- state.timers.drain(..end_ix).collect::<Vec<_>>()
+ let elapsed: Vec<_> = state.timers.drain(..end_ix).collect();
+ let count = elapsed.len();
+ let runnables = state.runnables.len();
+ drop(state);
+ // Dropping elapsed timers here wakes the waiting futures
+ drop(elapsed);
+ (count, runnables)
};
- if !elapsed_timers.is_empty() {
+ if elapsed_count > 0 {
+ let runnables_after = self.state.lock().runnables.len();
+ if std::env::var("DEBUG_SCHEDULER").is_ok() {
+ eprintln!(
+ "[scheduler] Expired {} timers at {:?}, runnables: {} -> {}",
+ elapsed_count,
+ self.clock.now(),
+ runnables_before,
+ runnables_after
+ );
+ }
return true;
}
let runnable = {
let state = &mut *self.state.lock();
- let ix = state.runnables.iter().position(|runnable| {
- runnable
- .session_id
- .is_none_or(|session_id| !state.blocked_sessions.contains(&session_id))
- });
- ix.and_then(|ix| state.runnables.remove(ix))
+
+ // Find candidate tasks:
+ // - For foreground tasks (with session_id), only the first task from each session
+ // is a candidate (to preserve intra-session ordering)
+ // - For background tasks (no session_id), all are candidates
+ // - Tasks from blocked sessions are excluded
+ // - If background_only is true, skip foreground tasks entirely
+ let mut seen_sessions = HashSet::new();
+ let candidate_indices: Vec<usize> = state
+ .runnables
+ .iter()
+ .enumerate()
+ .filter(|(_, runnable)| {
+ if let Some(session_id) = runnable.session_id {
+ // Skip foreground tasks if background_only mode
+ if background_only {
+ return false;
+ }
+ // Exclude tasks from blocked sessions
+ if state.blocked_sessions.contains(&session_id) {
+ return false;
+ }
+ // Only include first task from each session (insert returns true if new)
+ seen_sessions.insert(session_id)
+ } else {
+ // Background tasks are always candidates
+ true
+ }
+ })
+ .map(|(ix, _)| ix)
+ .collect();
+
+ if candidate_indices.is_empty() {
+ None
+ } else if state.randomize_order {
+ // Use priority-weighted random selection
+ let weights: Vec<u32> = candidate_indices
+ .iter()
+ .map(|&ix| state.runnables[ix].priority.weight())
+ .collect();
+ let total_weight: u32 = weights.iter().sum();
+
+ if total_weight == 0 {
+ // Fallback to uniform random if all weights are zero
+ let choice = self.rng.lock().random_range(0..candidate_indices.len());
+ state.runnables.remove(candidate_indices[choice])
+ } else {
+ let mut target = self.rng.lock().random_range(0..total_weight);
+ let mut selected_idx = 0;
+ for (i, &weight) in weights.iter().enumerate() {
+ if target < weight {
+ selected_idx = i;
+ break;
+ }
+ target -= weight;
+ }
+ state.runnables.remove(candidate_indices[selected_idx])
+ }
+ } else {
+ // Non-randomized: just take the first candidate task
+ state.runnables.remove(candidate_indices[0])
+ }
};
if let Some(runnable) = runnable {
+ // Check if the executor that spawned this task was closed
+ if runnable.runnable.metadata().is_closed() {
+ return true;
+ }
+ let is_foreground = runnable.session_id.is_some();
+ let was_main_thread = self.state.lock().is_main_thread;
+ self.state.lock().is_main_thread = is_foreground;
runnable.run();
+ self.state.lock().is_main_thread = was_main_thread;
return true;
}
false
}
- fn advance_clock_to_next_timer(&self) -> bool {
+ pub fn advance_clock_to_next_timer(&self) -> bool {
if let Some(timer) = self.state.lock().timers.first() {
self.clock.advance(timer.expiration - self.clock.now());
true
@@ -193,18 +331,41 @@ impl TestScheduler {
}
pub fn advance_clock(&self, duration: Duration) {
- let next_now = self.clock.now() + duration;
+ let debug = std::env::var("DEBUG_SCHEDULER").is_ok();
+ let start = self.clock.now();
+ let next_now = start + duration;
+ if debug {
+ let timer_count = self.state.lock().timers.len();
+ eprintln!(
+ "[scheduler] advance_clock({:?}) from {:?}, {} pending timers",
+ duration, start, timer_count
+ );
+ }
loop {
self.run();
if let Some(timer) = self.state.lock().timers.first()
&& timer.expiration <= next_now
{
- self.clock.advance(timer.expiration - self.clock.now());
+ let advance_to = timer.expiration;
+ if debug {
+ eprintln!(
+ "[scheduler] Advancing clock {:?} -> {:?} for timer",
+ self.clock.now(),
+ advance_to
+ );
+ }
+ self.clock.advance(advance_to - self.clock.now());
} else {
break;
}
}
self.clock.advance(next_now - self.clock.now());
+ if debug {
+ eprintln!(
+ "[scheduler] advance_clock done, now at {:?}",
+ self.clock.now()
+ );
+ }
}
fn park(&self, deadline: Option<Instant>) -> bool {
@@ -245,9 +406,9 @@ impl Scheduler for TestScheduler {
fn block(
&self,
session_id: Option<SessionId>,
- mut future: LocalBoxFuture<()>,
+ mut future: Pin<&mut dyn Future<Output = ()>>,
timeout: Option<Duration>,
- ) {
+ ) -> bool {
if let Some(session_id) = session_id {
self.state.lock().blocked_sessions.push(session_id);
}
@@ -270,10 +431,15 @@ impl Scheduler for TestScheduler {
};
let mut cx = Context::from_waker(&waker);
+ let mut completed = false;
for _ in 0..max_ticks {
- let Poll::Pending = future.poll_unpin(&mut cx) else {
- break;
- };
+ match future.as_mut().poll(&mut cx) {
+ Poll::Ready(()) => {
+ completed = true;
+ break;
+ }
+ Poll::Pending => {}
+ }
let mut stepped = None;
while self.rng.lock().random() {
@@ -297,9 +463,11 @@ impl Scheduler for TestScheduler {
if session_id.is_some() {
self.state.lock().blocked_sessions.pop();
}
+
+ completed
}
- fn schedule_foreground(&self, session_id: SessionId, runnable: Runnable) {
+ fn schedule_foreground(&self, session_id: SessionId, runnable: Runnable<RunnableMeta>) {
let mut state = self.state.lock();
let ix = if state.randomize_order {
let start_ix = state
@@ -317,6 +485,7 @@ impl Scheduler for TestScheduler {
ix,
ScheduledRunnable {
session_id: Some(session_id),
+ priority: Priority::default(),
runnable,
},
);
@@ -324,7 +493,11 @@ impl Scheduler for TestScheduler {
self.thread.unpark();
}
- fn schedule_background(&self, runnable: Runnable) {
+ fn schedule_background_with_priority(
+ &self,
+ runnable: Runnable<RunnableMeta>,
+ priority: Priority,
+ ) {
let mut state = self.state.lock();
let ix = if state.randomize_order {
self.rng.lock().random_range(0..=state.runnables.len())
@@ -335,6 +508,7 @@ impl Scheduler for TestScheduler {
ix,
ScheduledRunnable {
session_id: None,
+ priority,
runnable,
},
);
@@ -357,8 +531,8 @@ impl Scheduler for TestScheduler {
self.clock.clone()
}
- fn as_test(&self) -> &TestScheduler {
- self
+ fn as_test(&self) -> Option<&TestScheduler> {
+ Some(self)
}
}
@@ -395,7 +569,8 @@ impl Default for TestSchedulerConfig {
struct ScheduledRunnable {
session_id: Option<SessionId>,
- runnable: Runnable,
+ priority: Priority,
+ runnable: Runnable<RunnableMeta>,
}
impl ScheduledRunnable {
@@ -420,6 +595,7 @@ struct SchedulerState {
capture_pending_traces: bool,
next_trace_id: TraceId,
pending_traces: BTreeMap<TraceId, Backtrace>,
+ is_main_thread: bool,
}
const WAKER_VTABLE: RawWakerVTable = RawWakerVTable::new(
@@ -508,6 +684,46 @@ impl TracingWaker {
pub struct Yield(usize);
+/// A wrapper around `Arc<Mutex<StdRng>>` that provides convenient methods
+/// for random number generation without requiring explicit locking.
+#[derive(Clone)]
+pub struct SharedRng(Arc<Mutex<StdRng>>);
+
+impl SharedRng {
+ /// Lock the inner RNG for direct access. Use this when you need multiple
+ /// random operations without re-locking between each one.
+ pub fn lock(&self) -> MutexGuard<'_, StdRng> {
+ self.0.lock()
+ }
+
+ /// Generate a random value in the given range.
+ pub fn random_range<T, R>(&self, range: R) -> T
+ where
+ T: SampleUniform,
+ R: SampleRange<T>,
+ {
+ self.0.lock().random_range(range)
+ }
+
+ /// Generate a random boolean with the given probability of being true.
+ pub fn random_bool(&self, p: f64) -> bool {
+ self.0.lock().random_bool(p)
+ }
+
+ /// Generate a random value of the given type.
+ pub fn random<T>(&self) -> T
+ where
+ StandardUniform: Distribution<T>,
+ {
+ self.0.lock().random()
+ }
+
+ /// Generate a random ratio - true with probability `numerator/denominator`.
+ pub fn random_ratio(&self, numerator: u32, denominator: u32) -> bool {
+ self.0.lock().random_ratio(numerator, denominator)
+ }
+}
+
impl Future for Yield {
type Output = ();
@@ -238,7 +238,7 @@ fn test_block() {
}
#[test]
-#[should_panic(expected = "futures_channel::oneshot::Inner")]
+#[should_panic(expected = "Parking forbidden. Pending traces:")]
fn test_parking_panics() {
let config = TestSchedulerConfig {
capture_pending_traces: true,
@@ -297,20 +297,27 @@ fn test_block_with_timeout() {
let foreground = scheduler.foreground();
let future = future::ready(42);
let output = foreground.block_with_timeout(Duration::from_millis(100), future);
- assert_eq!(output.unwrap(), 42);
+ assert_eq!(output.ok(), Some(42));
});
// Test case: future times out
TestScheduler::once(async |scheduler| {
+ // Make timeout behavior deterministic by forcing the timeout tick budget to be exactly 0.
+ // This prevents `block_with_timeout` from making progress via extra scheduler stepping and
+ // accidentally completing work that we expect to time out.
+ scheduler.set_timeout_ticks(0..=0);
+
let foreground = scheduler.foreground();
let future = future::pending::<()>();
let output = foreground.block_with_timeout(Duration::from_millis(50), future);
- let _ = output.expect_err("future should not have finished");
+ assert!(output.is_err(), "future should not have finished");
});
// Test case: future makes progress via timer but still times out
let mut results = BTreeSet::new();
TestScheduler::many(100, async |scheduler| {
+ // Keep the existing probabilistic behavior here (do not force 0 ticks), since this subtest
+ // is explicitly checking that some seeds/timeouts can complete while others can time out.
let task = scheduler.background().spawn(async move {
Yield { polls: 10 }.await;
42
@@ -324,6 +331,44 @@ fn test_block_with_timeout() {
results.into_iter().collect::<Vec<_>>(),
vec![None, Some(42)]
);
+
+ // Regression test:
+ // A timed-out future must not be cancelled. The returned future should still be
+ // pollable to completion later. We also want to ensure time only advances when we
+ // explicitly advance it (not by yielding).
+ TestScheduler::once(async |scheduler| {
+ // Force immediate timeout: the timeout tick budget is 0 so we will not step or
+ // advance timers inside `block_with_timeout`.
+ scheduler.set_timeout_ticks(0..=0);
+
+ let background = scheduler.background();
+
+ // This task should only complete once time is explicitly advanced.
+ let task = background.spawn({
+ let scheduler = scheduler.clone();
+ async move {
+ scheduler.timer(Duration::from_millis(100)).await;
+ 123
+ }
+ });
+
+ // This should time out before we advance time enough for the timer to fire.
+ let timed_out = scheduler
+ .foreground()
+ .block_with_timeout(Duration::from_millis(50), task);
+ assert!(
+ timed_out.is_err(),
+ "expected timeout before advancing the clock enough for the timer"
+ );
+
+ // Now explicitly advance time and ensure the returned future can complete.
+ let mut task = timed_out.err().unwrap();
+ scheduler.advance_clock(Duration::from_millis(100));
+ scheduler.run();
+
+ let output = scheduler.foreground().block_on(&mut task);
+ assert_eq!(output, 123);
+ });
}
// When calling block, we shouldn't make progress on foreground-spawned futures with the same session id.
@@ -370,3 +415,64 @@ impl Future for Yield {
}
}
}
+
+#[test]
+fn test_background_priority_scheduling() {
+ use parking_lot::Mutex;
+
+ // Run many iterations to get statistical significance
+ let mut high_before_low_count = 0;
+ let iterations = 100;
+
+ for seed in 0..iterations {
+ let config = TestSchedulerConfig::with_seed(seed);
+ let scheduler = Arc::new(TestScheduler::new(config));
+ let background = scheduler.background();
+
+ let execution_order = Arc::new(Mutex::new(Vec::new()));
+
+ // Spawn low priority tasks first
+ for i in 0..3 {
+ let order = execution_order.clone();
+ background
+ .spawn_with_priority(Priority::Low, async move {
+ order.lock().push(format!("low-{}", i));
+ })
+ .detach();
+ }
+
+ // Spawn high priority tasks second
+ for i in 0..3 {
+ let order = execution_order.clone();
+ background
+ .spawn_with_priority(Priority::High, async move {
+ order.lock().push(format!("high-{}", i));
+ })
+ .detach();
+ }
+
+ scheduler.run();
+
+ // Count how many high priority tasks ran in the first half
+ let order = execution_order.lock();
+ let high_in_first_half = order
+ .iter()
+ .take(3)
+ .filter(|s| s.starts_with("high"))
+ .count();
+
+ if high_in_first_half >= 2 {
+ high_before_low_count += 1;
+ }
+ }
+
+ // High priority tasks should tend to run before low priority tasks
+ // With weights of 60 vs 10, high priority should dominate early execution
+ assert!(
+ high_before_low_count > iterations / 2,
+ "Expected high priority tasks to run before low priority tasks more often. \
+ Got {} out of {} iterations",
+ high_before_low_count,
+ iterations
+ );
+}
@@ -93,8 +93,8 @@ impl PickerDelegate for Delegate {
) -> Task<()> {
let candidates = self.candidates.clone();
self.matches = cx
- .background_executor()
- .block(fuzzy::match_strings(
+ .foreground_executor()
+ .block_on(fuzzy::match_strings(
&candidates,
&query,
true,
@@ -2750,7 +2750,7 @@ mod tests {
})
})
.detach();
- cx.background_spawn(async move {
+ let completion_check_task = cx.background_spawn(async move {
// The channel may be closed if the terminal is dropped before sending
// the completion signal, which can happen with certain task scheduling orders.
let exit_status = completion_rx.recv().await.ok().flatten();
@@ -2764,8 +2764,7 @@ mod tests {
#[cfg(not(target_os = "windows"))]
assert_eq!(exit_status.code(), None);
}
- })
- .detach();
+ });
let mut all_events = Vec::new();
while let Ok(Ok(new_event)) =
@@ -2780,6 +2779,8 @@ mod tests {
.any(|event| event == &Event::CloseTerminal),
"Wrong shell command should update the title but not should not close the terminal to show the error message, but got events: {all_events:?}",
);
+
+ completion_check_task.await;
}
#[test]
@@ -11541,6 +11541,9 @@ mod tests {
window,
cx,
);
+ });
+ cx.run_until_parked();
+ workspace.update_in(cx, |workspace, window, cx| {
workspace.move_item_to_pane_at_index(
&MoveItemToPane {
destination: 3,
@@ -252,7 +252,6 @@ pub struct LocalSnapshot {
/// The file handle of the worktree root
/// (so we can find it after it's been moved)
root_file_handle: Option<Arc<dyn fs::FileHandle>>,
- executor: BackgroundExecutor,
}
struct BackgroundScannerState {
@@ -418,7 +417,6 @@ impl Worktree {
PathStyle::local(),
),
root_file_handle,
- executor: cx.background_executor().clone(),
};
let worktree_id = snapshot.id();
@@ -461,7 +459,8 @@ impl Worktree {
entry.is_hidden = settings.is_path_hidden(path);
}
}
- snapshot.insert_entry(entry, fs.as_ref());
+ cx.foreground_executor()
+ .block_on(snapshot.insert_entry(entry, fs.as_ref()));
}
let (scan_requests_tx, scan_requests_rx) = channel::unbounded();
@@ -2571,11 +2570,11 @@ impl LocalSnapshot {
}
}
- fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
+ async fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
log::trace!("insert entry {:?}", entry.path);
if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
let abs_path = self.absolutize(&entry.path);
- match self.executor.block(build_gitignore(&abs_path, fs)) {
+ match build_gitignore(&abs_path, fs).await {
Ok(ignore) => {
self.ignores_by_parent_abs_path
.insert(abs_path.parent().unwrap().into(), (Arc::new(ignore), true));
@@ -2869,7 +2868,7 @@ impl BackgroundScannerState {
}
async fn insert_entry(&mut self, entry: Entry, fs: &dyn Fs, watcher: &dyn Watcher) -> Entry {
- let entry = self.snapshot.insert_entry(entry, fs);
+ let entry = self.snapshot.insert_entry(entry, fs).await;
if entry.path.file_name() == Some(&DOT_GIT) {
self.insert_git_repository(entry.path.clone(), fs, watcher)
.await;
@@ -4827,39 +4826,40 @@ impl BackgroundScanner {
async fn ignores_needing_update(&self) -> Vec<Arc<Path>> {
let mut ignores_to_update = Vec::new();
+ let mut excludes_to_load: Vec<(Arc<Path>, PathBuf)> = Vec::new();
+ // First pass: collect updates and drop stale entries without awaiting.
{
let snapshot = &mut self.state.lock().await.snapshot;
let abs_path = snapshot.abs_path.clone();
+ let mut repo_exclude_keys_to_remove: Vec<Arc<Path>> = Vec::new();
- snapshot.repo_exclude_by_work_dir_abs_path.retain(
- |work_dir_abs_path, (exclude, needs_update)| {
- if *needs_update {
- *needs_update = false;
- ignores_to_update.push(work_dir_abs_path.clone());
+ for (work_dir_abs_path, (_, needs_update)) in
+ snapshot.repo_exclude_by_work_dir_abs_path.iter_mut()
+ {
+ let repository = snapshot
+ .git_repositories
+ .iter()
+ .find(|(_, repo)| &repo.work_directory_abs_path == work_dir_abs_path);
- if let Some((_, repository)) = snapshot
- .git_repositories
- .iter()
- .find(|(_, repo)| &repo.work_directory_abs_path == work_dir_abs_path)
- {
- let exclude_abs_path =
- repository.common_dir_abs_path.join(REPO_EXCLUDE);
- if let Ok(current_exclude) = self
- .executor
- .block(build_gitignore(&exclude_abs_path, self.fs.as_ref()))
- {
- *exclude = Arc::new(current_exclude);
- }
- }
+ if *needs_update {
+ *needs_update = false;
+ ignores_to_update.push(work_dir_abs_path.clone());
+
+ if let Some((_, repository)) = repository {
+ let exclude_abs_path = repository.common_dir_abs_path.join(REPO_EXCLUDE);
+ excludes_to_load.push((work_dir_abs_path.clone(), exclude_abs_path));
}
+ }
- snapshot
- .git_repositories
- .iter()
- .any(|(_, repo)| &repo.work_directory_abs_path == work_dir_abs_path)
- },
- );
+ if repository.is_none() {
+ repo_exclude_keys_to_remove.push(work_dir_abs_path.clone());
+ }
+ }
+
+ for key in repo_exclude_keys_to_remove {
+ snapshot.repo_exclude_by_work_dir_abs_path.remove(&key);
+ }
snapshot
.ignores_by_parent_abs_path
@@ -4884,6 +4884,29 @@ impl BackgroundScanner {
});
}
+ // Load gitignores asynchronously (outside the lock)
+ let mut loaded_excludes: Vec<(Arc<Path>, Arc<Gitignore>)> = Vec::new();
+ for (work_dir_abs_path, exclude_abs_path) in excludes_to_load {
+ if let Ok(current_exclude) = build_gitignore(&exclude_abs_path, self.fs.as_ref()).await
+ {
+ loaded_excludes.push((work_dir_abs_path, Arc::new(current_exclude)));
+ }
+ }
+
+ // Second pass: apply updates.
+ if !loaded_excludes.is_empty() {
+ let snapshot = &mut self.state.lock().await.snapshot;
+
+ for (work_dir_abs_path, exclude) in loaded_excludes {
+ if let Some((existing_exclude, _)) = snapshot
+ .repo_exclude_by_work_dir_abs_path
+ .get_mut(&work_dir_abs_path)
+ {
+ *existing_exclude = exclude;
+ }
+ }
+ }
+
ignores_to_update
}
@@ -529,9 +529,9 @@ fn main() {
debugger_tools::init(cx);
client::init(&client, cx);
- let system_id = cx.background_executor().block(system_id).ok();
- let installation_id = cx.background_executor().block(installation_id).ok();
- let session = cx.background_executor().block(session);
+ let system_id = cx.foreground_executor().block_on(system_id).ok();
+ let installation_id = cx.foreground_executor().block_on(installation_id).ok();
+ let session = cx.foreground_executor().block_on(session);
let telemetry = client.telemetry();
telemetry.start(
@@ -1551,7 +1551,7 @@ fn load_embedded_fonts(cx: &App) {
let embedded_fonts = Mutex::new(Vec::new());
let executor = cx.background_executor();
- executor.block(executor.scoped(|scope| {
+ cx.foreground_executor().block_on(executor.scoped(|scope| {
for font_path in &font_paths {
if !font_path.ends_with(".ttf") {
continue;
@@ -120,7 +120,7 @@ fn save_hang_trace(
background_executor: &gpui::BackgroundExecutor,
hang_time: chrono::DateTime<chrono::Local>,
) {
- let thread_timings = background_executor.dispatcher.get_all_timings();
+ let thread_timings = background_executor.dispatcher().get_all_timings();
let thread_timings = thread_timings
.into_iter()
.map(|mut timings| {
@@ -260,7 +260,7 @@ fn run_visual_tests(project_path: PathBuf, update_baseline: bool) -> Result<()>
// worktree creation spawns foreground tasks via cx.spawn
// Allow parking since filesystem operations happen outside the test dispatcher
cx.background_executor.allow_parking();
- let worktree_result = cx.background_executor.block_test(add_worktree_task);
+ let worktree_result = cx.foreground_executor.block_test(add_worktree_task);
cx.background_executor.forbid_parking();
worktree_result.context("Failed to add worktree")?;
@@ -275,7 +275,7 @@ fn run_visual_tests(project_path: PathBuf, update_baseline: bool) -> Result<()>
cx.background_executor.allow_parking();
let panel = cx
- .background_executor
+ .foreground_executor
.block_test(ProjectPanel::load(weak_workspace, async_window_cx))
.context("Failed to load project panel")?;
cx.background_executor.forbid_parking();
@@ -316,7 +316,7 @@ fn run_visual_tests(project_path: PathBuf, update_baseline: bool) -> Result<()>
if let Some(task) = open_file_task {
cx.background_executor.allow_parking();
- let block_result = cx.background_executor.block_test(task);
+ let block_result = cx.foreground_executor.block_test(task);
cx.background_executor.forbid_parking();
if let Ok(item) = block_result {
workspace_window
@@ -912,7 +912,7 @@ fn run_breakpoint_hover_visual_tests(
.context("Failed to start adding worktree")?;
cx.background_executor.allow_parking();
- let worktree_result = cx.background_executor.block_test(add_worktree_task);
+ let worktree_result = cx.foreground_executor.block_test(add_worktree_task);
cx.background_executor.forbid_parking();
worktree_result.context("Failed to add worktree")?;
@@ -937,7 +937,7 @@ fn run_breakpoint_hover_visual_tests(
if let Some(task) = open_file_task {
cx.background_executor.allow_parking();
- let _ = cx.background_executor.block_test(task);
+ let _ = cx.foreground_executor.block_test(task);
cx.background_executor.forbid_parking();
}
@@ -1198,7 +1198,7 @@ import { AiPaneTabContext } from 'context';
});
cx.background_executor.allow_parking();
- let _ = cx.background_executor.block_test(add_worktree_task);
+ let _ = cx.foreground_executor.block_test(add_worktree_task);
cx.background_executor.forbid_parking();
cx.run_until_parked();
@@ -1333,7 +1333,7 @@ import { AiPaneTabContext } from 'context';
if let Some(task) = open_file_task {
cx.background_executor.allow_parking();
- let _ = cx.background_executor.block_test(task);
+ let _ = cx.foreground_executor.block_test(task);
cx.background_executor.forbid_parking();
}
@@ -1478,7 +1478,7 @@ fn run_agent_thread_view_test(
cx.background_executor.allow_parking();
let (worktree, _) = cx
- .background_executor
+ .foreground_executor
.block_test(add_worktree_task)
.context("Failed to add worktree")?;
cx.background_executor.forbid_parking();
@@ -1528,7 +1528,7 @@ fn run_agent_thread_view_test(
let run_task = cx.update(|cx| tool.clone().run(input, event_stream, cx));
cx.background_executor.allow_parking();
- let run_result = cx.background_executor.block_test(run_task);
+ let run_result = cx.foreground_executor.block_test(run_task);
cx.background_executor.forbid_parking();
run_result.context("ReadFileTool failed")?;
@@ -1609,7 +1609,7 @@ fn run_agent_thread_view_test(
cx.update(|cx| prompt_store::PromptBuilder::load(app_state.fs.clone(), false, cx));
cx.background_executor.allow_parking();
let panel = cx
- .background_executor
+ .foreground_executor
.block_test(AgentPanel::load(
weak_workspace,
prompt_builder,
@@ -1653,7 +1653,7 @@ fn run_agent_thread_view_test(
});
cx.background_executor.allow_parking();
- let send_result = cx.background_executor.block_test(send_future);
+ let send_result = cx.foreground_executor.block_test(send_future);
cx.background_executor.forbid_parking();
send_result.context("Failed to send message")?;
@@ -1529,12 +1529,12 @@ pub fn handle_settings_file_changes(
// Initial load of both settings files
let global_content = cx
- .background_executor()
- .block(global_settings_file_rx.next())
+ .foreground_executor()
+ .block_on(global_settings_file_rx.next())
.unwrap();
let user_content = cx
- .background_executor()
- .block(user_settings_file_rx.next())
+ .foreground_executor()
+ .block_on(user_settings_file_rx.next())
.unwrap();
SettingsStore::update_global(cx, |store, cx| {
@@ -2195,7 +2195,7 @@ pub(crate) fn eager_load_active_theme_and_icon_theme(fs: Arc<dyn Fs>, cx: &mut A
return;
}
- executor.block(executor.scoped(|scope| {
+ cx.foreground_executor().block_on(executor.scoped(|scope| {
for load_target in themes_to_load {
let theme_registry = &theme_registry;
let reload_tasks = &reload_tasks;