1use crate::{App, Bigus, PlatformDispatcher};
2use async_task::Runnable;
3use futures::channel::mpsc;
4use smol::prelude::*;
5use std::mem::ManuallyDrop;
6use std::panic::Location;
7use std::thread::{self, ThreadId};
8use std::{
9 fmt::Debug,
10 marker::PhantomData,
11 mem,
12 num::NonZeroUsize,
13 pin::Pin,
14 rc::Rc,
15 sync::{
16 Arc,
17 atomic::{AtomicUsize, Ordering::SeqCst},
18 },
19 task::{Context, Poll},
20 time::{Duration, Instant},
21};
22use util::TryFutureExt;
23use waker_fn::waker_fn;
24
25#[cfg(any(test, feature = "test-support"))]
26use rand::rngs::StdRng;
27
28/// A pointer to the executor that is currently running,
29/// for spawning background tasks.
30#[derive(Clone)]
31pub struct BackgroundExecutor {
32 #[doc(hidden)]
33 pub dispatcher: Arc<dyn PlatformDispatcher>,
34}
35
36/// A pointer to the executor that is currently running,
37/// for spawning tasks on the main thread.
38///
39/// This is intentionally `!Send` via the `not_send` marker field. This is because
40/// `ForegroundExecutor::spawn` does not require `Send` but checks at runtime that the future is
41/// only polled from the same thread it was spawned from. These checks would fail when spawning
42/// foreground tasks from from background threads.
43#[derive(Clone)]
44pub struct ForegroundExecutor {
45 #[doc(hidden)]
46 pub dispatcher: Arc<dyn PlatformDispatcher>,
47 not_send: PhantomData<Rc<()>>,
48}
49
50/// Task is a primitive that allows work to happen in the background.
51///
52/// It implements [`Future`] so you can `.await` on it.
53///
54/// If you drop a task it will be cancelled immediately. Calling [`Task::detach`] allows
55/// the task to continue running, but with no way to return a value.
56#[must_use]
57#[derive(Debug)]
58pub struct Task<T>(TaskState<T>);
59
60#[derive(Debug)]
61enum TaskState<T> {
62 /// A task that is ready to return a value
63 Ready(Option<T>),
64
65 /// A task that is currently running.
66 Spawned(async_task::Task<T, Bigus>),
67}
68
69impl<T> Task<T> {
70 /// Creates a new task that will resolve with the value
71 pub fn ready(val: T) -> Self {
72 Task(TaskState::Ready(Some(val)))
73 }
74
75 /// Detaching a task runs it to completion in the background
76 pub fn detach(self) {
77 match self {
78 Task(TaskState::Ready(_)) => {}
79 Task(TaskState::Spawned(task)) => task.detach(),
80 }
81 }
82}
83
84impl<E, T> Task<Result<T, E>>
85where
86 T: 'static,
87 E: 'static + Debug,
88{
89 /// Run the task to completion in the background and log any
90 /// errors that occur.
91 #[track_caller]
92 pub fn detach_and_log_err(self, cx: &App) {
93 let location = core::panic::Location::caller();
94 cx.foreground_executor()
95 .spawn(self.log_tracked_err(*location))
96 .detach();
97 }
98}
99
100impl<T> Future for Task<T> {
101 type Output = T;
102
103 fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
104 match unsafe { self.get_unchecked_mut() } {
105 Task(TaskState::Ready(val)) => Poll::Ready(val.take().unwrap()),
106 Task(TaskState::Spawned(task)) => task.poll(cx),
107 }
108 }
109}
110
111/// A task label is an opaque identifier that you can use to
112/// refer to a task in tests.
113#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
114pub struct TaskLabel(NonZeroUsize);
115
116impl Default for TaskLabel {
117 fn default() -> Self {
118 Self::new()
119 }
120}
121
122impl TaskLabel {
123 /// Construct a new task label.
124 pub fn new() -> Self {
125 static NEXT_TASK_LABEL: AtomicUsize = AtomicUsize::new(1);
126 Self(NEXT_TASK_LABEL.fetch_add(1, SeqCst).try_into().unwrap())
127 }
128}
129
130type AnyLocalFuture<R> = Pin<Box<dyn 'static + Future<Output = R>>>;
131
132type AnyFuture<R> = Pin<Box<dyn 'static + Send + Future<Output = R>>>;
133
134/// BackgroundExecutor lets you run things on background threads.
135/// In production this is a thread pool with no ordering guarantees.
136/// In tests this is simulated by running tasks one by one in a deterministic
137/// (but arbitrary) order controlled by the `SEED` environment variable.
138impl BackgroundExecutor {
139 #[doc(hidden)]
140 pub fn new(dispatcher: Arc<dyn PlatformDispatcher>) -> Self {
141 Self { dispatcher }
142 }
143
144 /// Enqueues the given future to be run to completion on a background thread.
145 pub fn spawn<R>(&self, future: impl Future<Output = R> + Send + 'static) -> Task<R>
146 where
147 R: Send + 'static,
148 {
149 self.spawn_internal::<R>(Box::pin(future), None)
150 }
151
152 /// Enqueues the given future to be run to completion on a background thread.
153 /// The given label can be used to control the priority of the task in tests.
154 pub fn spawn_labeled<R>(
155 &self,
156 label: TaskLabel,
157 future: impl Future<Output = R> + Send + 'static,
158 ) -> Task<R>
159 where
160 R: Send + 'static,
161 {
162 self.spawn_internal::<R>(Box::pin(future), Some(label))
163 }
164
165 fn spawn_internal<R: Send + 'static>(
166 &self,
167 future: AnyFuture<R>,
168 label: Option<TaskLabel>,
169 ) -> Task<R> {
170 let dispatcher = self.dispatcher.clone();
171 let location = core::panic::Location::caller();
172 let (runnable, task) = async_task::Builder::new()
173 .metadata(Bigus { location })
174 .spawn(
175 move |_| future,
176 move |runnable| dispatcher.dispatch(runnable, label),
177 );
178 runnable.schedule();
179 Task(TaskState::Spawned(task))
180 }
181
182 /// Used by the test harness to run an async test in a synchronous fashion.
183 #[cfg(any(test, feature = "test-support"))]
184 #[track_caller]
185 pub fn block_test<R>(&self, future: impl Future<Output = R>) -> R {
186 if let Ok(value) = self.block_internal(false, future, None) {
187 value
188 } else {
189 unreachable!()
190 }
191 }
192
193 /// Block the current thread until the given future resolves.
194 /// Consider using `block_with_timeout` instead.
195 pub fn block<R>(&self, future: impl Future<Output = R>) -> R {
196 if let Ok(value) = self.block_internal(true, future, None) {
197 value
198 } else {
199 unreachable!()
200 }
201 }
202
203 #[cfg(not(any(test, feature = "test-support")))]
204 pub(crate) fn block_internal<Fut: Future>(
205 &self,
206 _background_only: bool,
207 future: Fut,
208 timeout: Option<Duration>,
209 ) -> Result<Fut::Output, impl Future<Output = Fut::Output> + use<Fut>> {
210 use std::time::Instant;
211
212 let mut future = Box::pin(future);
213 if timeout == Some(Duration::ZERO) {
214 return Err(future);
215 }
216 let deadline = timeout.map(|timeout| Instant::now() + timeout);
217
218 let parker = parking::Parker::new();
219 let unparker = parker.unparker();
220 let waker = waker_fn(move || {
221 unparker.unpark();
222 });
223 let mut cx = std::task::Context::from_waker(&waker);
224
225 loop {
226 match future.as_mut().poll(&mut cx) {
227 Poll::Ready(result) => return Ok(result),
228 Poll::Pending => {
229 let timeout =
230 deadline.map(|deadline| deadline.saturating_duration_since(Instant::now()));
231 if let Some(timeout) = timeout {
232 if !parker.park_timeout(timeout)
233 && deadline.is_some_and(|deadline| deadline < Instant::now())
234 {
235 return Err(future);
236 }
237 } else {
238 parker.park();
239 }
240 }
241 }
242 }
243 }
244
245 #[cfg(any(test, feature = "test-support"))]
246 #[track_caller]
247 pub(crate) fn block_internal<Fut: Future>(
248 &self,
249 background_only: bool,
250 future: Fut,
251 timeout: Option<Duration>,
252 ) -> Result<Fut::Output, impl Future<Output = Fut::Output> + use<Fut>> {
253 use std::sync::atomic::AtomicBool;
254
255 use parking::Parker;
256
257 let mut future = Box::pin(future);
258 if timeout == Some(Duration::ZERO) {
259 return Err(future);
260 }
261 let Some(dispatcher) = self.dispatcher.as_test() else {
262 return Err(future);
263 };
264
265 let mut max_ticks = if timeout.is_some() {
266 dispatcher.gen_block_on_ticks()
267 } else {
268 usize::MAX
269 };
270
271 let parker = Parker::new();
272 let unparker = parker.unparker();
273
274 let awoken = Arc::new(AtomicBool::new(false));
275 let waker = waker_fn({
276 let awoken = awoken.clone();
277 let unparker = unparker.clone();
278 move || {
279 awoken.store(true, SeqCst);
280 unparker.unpark();
281 }
282 });
283 let mut cx = std::task::Context::from_waker(&waker);
284
285 loop {
286 match future.as_mut().poll(&mut cx) {
287 Poll::Ready(result) => return Ok(result),
288 Poll::Pending => {
289 if max_ticks == 0 {
290 return Err(future);
291 }
292 max_ticks -= 1;
293
294 if !dispatcher.tick(background_only) {
295 if awoken.swap(false, SeqCst) {
296 continue;
297 }
298
299 if !dispatcher.parking_allowed() {
300 if dispatcher.advance_clock_to_next_delayed() {
301 continue;
302 }
303 let mut backtrace_message = String::new();
304 let mut waiting_message = String::new();
305 if let Some(backtrace) = dispatcher.waiting_backtrace() {
306 backtrace_message =
307 format!("\nbacktrace of waiting future:\n{:?}", backtrace);
308 }
309 if let Some(waiting_hint) = dispatcher.waiting_hint() {
310 waiting_message = format!("\n waiting on: {}\n", waiting_hint);
311 }
312 panic!(
313 "parked with nothing left to run{waiting_message}{backtrace_message}",
314 )
315 }
316 dispatcher.set_unparker(unparker.clone());
317 parker.park();
318 }
319 }
320 }
321 }
322 }
323
324 /// Block the current thread until the given future resolves
325 /// or `duration` has elapsed.
326 pub fn block_with_timeout<Fut: Future>(
327 &self,
328 duration: Duration,
329 future: Fut,
330 ) -> Result<Fut::Output, impl Future<Output = Fut::Output> + use<Fut>> {
331 self.block_internal(true, future, Some(duration))
332 }
333
334 /// Scoped lets you start a number of tasks and waits
335 /// for all of them to complete before returning.
336 pub async fn scoped<'scope, F>(&self, scheduler: F)
337 where
338 F: FnOnce(&mut Scope<'scope>),
339 {
340 let mut scope = Scope::new(self.clone());
341 (scheduler)(&mut scope);
342 let spawned = mem::take(&mut scope.futures)
343 .into_iter()
344 .map(|f| self.spawn(f))
345 .collect::<Vec<_>>();
346 for task in spawned {
347 task.await;
348 }
349 }
350
351 /// Get the current time.
352 ///
353 /// Calling this instead of `std::time::Instant::now` allows the use
354 /// of fake timers in tests.
355 pub fn now(&self) -> Instant {
356 self.dispatcher.now()
357 }
358
359 /// Returns a task that will complete after the given duration.
360 /// Depending on other concurrent tasks the elapsed duration may be longer
361 /// than requested.
362 pub fn timer(&self, duration: Duration) -> Task<()> {
363 if duration.is_zero() {
364 return Task::ready(());
365 }
366 let location = core::panic::Location::caller();
367 let (runnable, task) = async_task::Builder::new()
368 .metadata(Bigus { location })
369 .spawn(move |_| async move {}, {
370 let dispatcher = self.dispatcher.clone();
371 move |runnable| dispatcher.dispatch_after(duration, runnable)
372 });
373 runnable.schedule();
374 Task(TaskState::Spawned(task))
375 }
376
377 /// in tests, start_waiting lets you indicate which task is waiting (for debugging only)
378 #[cfg(any(test, feature = "test-support"))]
379 pub fn start_waiting(&self) {
380 self.dispatcher.as_test().unwrap().start_waiting();
381 }
382
383 /// in tests, removes the debugging data added by start_waiting
384 #[cfg(any(test, feature = "test-support"))]
385 pub fn finish_waiting(&self) {
386 self.dispatcher.as_test().unwrap().finish_waiting();
387 }
388
389 /// in tests, run an arbitrary number of tasks (determined by the SEED environment variable)
390 #[cfg(any(test, feature = "test-support"))]
391 pub fn simulate_random_delay(&self) -> impl Future<Output = ()> + use<> {
392 self.dispatcher.as_test().unwrap().simulate_random_delay()
393 }
394
395 /// in tests, indicate that a given task from `spawn_labeled` should run after everything else
396 #[cfg(any(test, feature = "test-support"))]
397 pub fn deprioritize(&self, task_label: TaskLabel) {
398 self.dispatcher.as_test().unwrap().deprioritize(task_label)
399 }
400
401 /// in tests, move time forward. This does not run any tasks, but does make `timer`s ready.
402 #[cfg(any(test, feature = "test-support"))]
403 pub fn advance_clock(&self, duration: Duration) {
404 self.dispatcher.as_test().unwrap().advance_clock(duration)
405 }
406
407 /// in tests, run one task.
408 #[cfg(any(test, feature = "test-support"))]
409 pub fn tick(&self) -> bool {
410 self.dispatcher.as_test().unwrap().tick(false)
411 }
412
413 /// in tests, run all tasks that are ready to run. If after doing so
414 /// the test still has outstanding tasks, this will panic. (See also [`Self::allow_parking`])
415 #[cfg(any(test, feature = "test-support"))]
416 pub fn run_until_parked(&self) {
417 self.dispatcher.as_test().unwrap().run_until_parked()
418 }
419
420 /// in tests, prevents `run_until_parked` from panicking if there are outstanding tasks.
421 /// This is useful when you are integrating other (non-GPUI) futures, like disk access, that
422 /// do take real async time to run.
423 #[cfg(any(test, feature = "test-support"))]
424 pub fn allow_parking(&self) {
425 self.dispatcher.as_test().unwrap().allow_parking();
426 }
427
428 /// undoes the effect of [`Self::allow_parking`].
429 #[cfg(any(test, feature = "test-support"))]
430 pub fn forbid_parking(&self) {
431 self.dispatcher.as_test().unwrap().forbid_parking();
432 }
433
434 /// adds detail to the "parked with nothing let to run" message.
435 #[cfg(any(test, feature = "test-support"))]
436 pub fn set_waiting_hint(&self, msg: Option<String>) {
437 self.dispatcher.as_test().unwrap().set_waiting_hint(msg);
438 }
439
440 /// in tests, returns the rng used by the dispatcher and seeded by the `SEED` environment variable
441 #[cfg(any(test, feature = "test-support"))]
442 pub fn rng(&self) -> StdRng {
443 self.dispatcher.as_test().unwrap().rng()
444 }
445
446 /// How many CPUs are available to the dispatcher.
447 pub fn num_cpus(&self) -> usize {
448 #[cfg(any(test, feature = "test-support"))]
449 return 4;
450
451 #[cfg(not(any(test, feature = "test-support")))]
452 return num_cpus::get();
453 }
454
455 /// Whether we're on the main thread.
456 pub fn is_main_thread(&self) -> bool {
457 self.dispatcher.is_main_thread()
458 }
459
460 #[cfg(any(test, feature = "test-support"))]
461 /// in tests, control the number of ticks that `block_with_timeout` will run before timing out.
462 pub fn set_block_on_ticks(&self, range: std::ops::RangeInclusive<usize>) {
463 self.dispatcher.as_test().unwrap().set_block_on_ticks(range);
464 }
465}
466
467/// ForegroundExecutor runs things on the main thread.
468impl ForegroundExecutor {
469 /// Creates a new ForegroundExecutor from the given PlatformDispatcher.
470 pub fn new(dispatcher: Arc<dyn PlatformDispatcher>) -> Self {
471 Self {
472 dispatcher,
473 not_send: PhantomData,
474 }
475 }
476
477 /// Enqueues the given Task to run on the main thread at some point in the future.
478 #[track_caller]
479 pub fn spawn<F, R>(&self, future: F) -> Task<R>
480 where
481 F: Future<Output = R> + 'static,
482 R: 'static,
483 {
484 let dispatcher = self.dispatcher.clone();
485 let location = core::panic::Location::caller();
486
487 #[track_caller]
488 fn inner<R: 'static>(
489 dispatcher: Arc<dyn PlatformDispatcher>,
490 future: AnyLocalFuture<R>,
491 location: &'static core::panic::Location<'static>,
492 ) -> Task<R> {
493 let (runnable, task) = spawn_local_with_source_location(
494 future,
495 move |runnable| dispatcher.dispatch_on_main_thread(runnable),
496 Bigus { location },
497 );
498 runnable.schedule();
499 Task(TaskState::Spawned(task))
500 }
501 inner::<R>(dispatcher, Box::pin(future), location)
502 }
503}
504
505/// Variant of `async_task::spawn_local` that includes the source location of the spawn in panics.
506///
507/// Copy-modified from:
508/// <https://github.com/smol-rs/async-task/blob/ca9dbe1db9c422fd765847fa91306e30a6bb58a9/src/runnable.rs#L405>
509#[track_caller]
510fn spawn_local_with_source_location<Fut, S, M>(
511 future: Fut,
512 schedule: S,
513 metadata: M,
514) -> (Runnable<M>, async_task::Task<Fut::Output, M>)
515where
516 Fut: Future + 'static,
517 Fut::Output: 'static,
518 S: async_task::Schedule<M> + Send + Sync + 'static,
519 M: 'static,
520{
521 #[inline]
522 fn thread_id() -> ThreadId {
523 std::thread_local! {
524 static ID: ThreadId = thread::current().id();
525 }
526 ID.try_with(|id| *id)
527 .unwrap_or_else(|_| thread::current().id())
528 }
529
530 struct Checked<F> {
531 id: ThreadId,
532 inner: ManuallyDrop<F>,
533 location: &'static Location<'static>,
534 }
535
536 impl<F> Drop for Checked<F> {
537 fn drop(&mut self) {
538 assert!(
539 self.id == thread_id(),
540 "local task dropped by a thread that didn't spawn it. Task spawned at {}",
541 self.location
542 );
543 unsafe { ManuallyDrop::drop(&mut self.inner) };
544 }
545 }
546
547 impl<F: Future> Future for Checked<F> {
548 type Output = F::Output;
549
550 fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
551 assert!(
552 self.id == thread_id(),
553 "local task polled by a thread that didn't spawn it. Task spawned at {}",
554 self.location
555 );
556 unsafe { self.map_unchecked_mut(|c| &mut *c.inner).poll(cx) }
557 }
558 }
559
560 // Wrap the future into one that checks which thread it's on.
561 let future = Checked {
562 id: thread_id(),
563 inner: ManuallyDrop::new(future),
564 location: Location::caller(),
565 };
566
567 unsafe {
568 async_task::Builder::new()
569 .metadata(metadata)
570 .spawn_unchecked(move |_| future, schedule)
571 }
572}
573
574/// Scope manages a set of tasks that are enqueued and waited on together. See [`BackgroundExecutor::scoped`].
575pub struct Scope<'a> {
576 executor: BackgroundExecutor,
577 futures: Vec<Pin<Box<dyn Future<Output = ()> + Send + 'static>>>,
578 tx: Option<mpsc::Sender<()>>,
579 rx: mpsc::Receiver<()>,
580 lifetime: PhantomData<&'a ()>,
581}
582
583impl<'a> Scope<'a> {
584 fn new(executor: BackgroundExecutor) -> Self {
585 let (tx, rx) = mpsc::channel(1);
586 Self {
587 executor,
588 tx: Some(tx),
589 rx,
590 futures: Default::default(),
591 lifetime: PhantomData,
592 }
593 }
594
595 /// How many CPUs are available to the dispatcher.
596 pub fn num_cpus(&self) -> usize {
597 self.executor.num_cpus()
598 }
599
600 /// Spawn a future into this scope.
601 pub fn spawn<F>(&mut self, f: F)
602 where
603 F: Future<Output = ()> + Send + 'a,
604 {
605 let tx = self.tx.clone().unwrap();
606
607 // SAFETY: The 'a lifetime is guaranteed to outlive any of these futures because
608 // dropping this `Scope` blocks until all of the futures have resolved.
609 let f = unsafe {
610 mem::transmute::<
611 Pin<Box<dyn Future<Output = ()> + Send + 'a>>,
612 Pin<Box<dyn Future<Output = ()> + Send + 'static>>,
613 >(Box::pin(async move {
614 f.await;
615 drop(tx);
616 }))
617 };
618 self.futures.push(f);
619 }
620}
621
622impl Drop for Scope<'_> {
623 fn drop(&mut self) {
624 self.tx.take().unwrap();
625
626 // Wait until the channel is closed, which means that all of the spawned
627 // futures have resolved.
628 self.executor.block(self.rx.next());
629 }
630}