executor.rs

  1use crate::{App, PlatformDispatcher, RunnableMeta, RunnableVariant};
  2use async_task::Runnable;
  3use futures::channel::mpsc;
  4use smol::prelude::*;
  5use std::{
  6    fmt::Debug,
  7    marker::PhantomData,
  8    mem::{self, ManuallyDrop},
  9    num::NonZeroUsize,
 10    panic::Location,
 11    pin::Pin,
 12    rc::Rc,
 13    sync::{
 14        Arc,
 15        atomic::{AtomicUsize, Ordering},
 16    },
 17    task::{Context, Poll},
 18    thread::{self, ThreadId},
 19    time::{Duration, Instant},
 20};
 21use util::TryFutureExt;
 22use waker_fn::waker_fn;
 23
 24#[cfg(any(test, feature = "test-support"))]
 25use rand::rngs::StdRng;
 26
 27/// A pointer to the executor that is currently running,
 28/// for spawning background tasks.
 29#[derive(Clone)]
 30pub struct BackgroundExecutor {
 31    #[doc(hidden)]
 32    pub dispatcher: Arc<dyn PlatformDispatcher>,
 33}
 34
 35/// A pointer to the executor that is currently running,
 36/// for spawning tasks on the main thread.
 37///
 38/// This is intentionally `!Send` via the `not_send` marker field. This is because
 39/// `ForegroundExecutor::spawn` does not require `Send` but checks at runtime that the future is
 40/// only polled from the same thread it was spawned from. These checks would fail when spawning
 41/// foreground tasks from background threads.
 42#[derive(Clone)]
 43pub struct ForegroundExecutor {
 44    #[doc(hidden)]
 45    pub dispatcher: Arc<dyn PlatformDispatcher>,
 46    not_send: PhantomData<Rc<()>>,
 47}
 48
 49/// Task is a primitive that allows work to happen in the background.
 50///
 51/// It implements [`Future`] so you can `.await` on it.
 52///
 53/// If you drop a task it will be cancelled immediately. Calling [`Task::detach`] allows
 54/// the task to continue running, but with no way to return a value.
 55#[must_use]
 56#[derive(Debug)]
 57pub struct Task<T>(TaskState<T>);
 58
 59#[derive(Debug)]
 60enum TaskState<T> {
 61    /// A task that is ready to return a value
 62    Ready(Option<T>),
 63
 64    /// A task that is currently running.
 65    Spawned(async_task::Task<T, RunnableMeta>),
 66}
 67
 68impl<T> Task<T> {
 69    /// Creates a new task that will resolve with the value
 70    pub fn ready(val: T) -> Self {
 71        Task(TaskState::Ready(Some(val)))
 72    }
 73
 74    /// Detaching a task runs it to completion in the background
 75    pub fn detach(self) {
 76        match self {
 77            Task(TaskState::Ready(_)) => {}
 78            Task(TaskState::Spawned(task)) => task.detach(),
 79        }
 80    }
 81}
 82
 83impl<E, T> Task<Result<T, E>>
 84where
 85    T: 'static,
 86    E: 'static + Debug,
 87{
 88    /// Run the task to completion in the background and log any
 89    /// errors that occur.
 90    #[track_caller]
 91    pub fn detach_and_log_err(self, cx: &App) {
 92        let location = core::panic::Location::caller();
 93        cx.foreground_executor()
 94            .spawn(self.log_tracked_err(*location))
 95            .detach();
 96    }
 97}
 98
 99impl<T> Future for Task<T> {
100    type Output = T;
101
102    fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
103        match unsafe { self.get_unchecked_mut() } {
104            Task(TaskState::Ready(val)) => Poll::Ready(val.take().unwrap()),
105            Task(TaskState::Spawned(task)) => task.poll(cx),
106        }
107    }
108}
109
110/// A task label is an opaque identifier that you can use to
111/// refer to a task in tests.
112#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
113pub struct TaskLabel(NonZeroUsize);
114
115impl Default for TaskLabel {
116    fn default() -> Self {
117        Self::new()
118    }
119}
120
121impl TaskLabel {
122    /// Construct a new task label.
123    pub fn new() -> Self {
124        static NEXT_TASK_LABEL: AtomicUsize = AtomicUsize::new(1);
125        Self(
126            NEXT_TASK_LABEL
127                .fetch_add(1, Ordering::SeqCst)
128                .try_into()
129                .unwrap(),
130        )
131    }
132}
133
134type AnyLocalFuture<R> = Pin<Box<dyn 'static + Future<Output = R>>>;
135
136type AnyFuture<R> = Pin<Box<dyn 'static + Send + Future<Output = R>>>;
137
138/// BackgroundExecutor lets you run things on background threads.
139/// In production this is a thread pool with no ordering guarantees.
140/// In tests this is simulated by running tasks one by one in a deterministic
141/// (but arbitrary) order controlled by the `SEED` environment variable.
142impl BackgroundExecutor {
143    #[doc(hidden)]
144    pub fn new(dispatcher: Arc<dyn PlatformDispatcher>) -> Self {
145        Self { dispatcher }
146    }
147
148    /// Enqueues the given future to be run to completion on a background thread.
149    #[track_caller]
150    pub fn spawn<R>(&self, future: impl Future<Output = R> + Send + 'static) -> Task<R>
151    where
152        R: Send + 'static,
153    {
154        self.spawn_internal::<R>(Box::pin(future), None)
155    }
156
157    /// Enqueues the given future to be run to completion on a background thread.
158    /// The given label can be used to control the priority of the task in tests.
159    #[track_caller]
160    pub fn spawn_labeled<R>(
161        &self,
162        label: TaskLabel,
163        future: impl Future<Output = R> + Send + 'static,
164    ) -> Task<R>
165    where
166        R: Send + 'static,
167    {
168        self.spawn_internal::<R>(Box::pin(future), Some(label))
169    }
170
171    #[track_caller]
172    fn spawn_internal<R: Send + 'static>(
173        &self,
174        future: AnyFuture<R>,
175        label: Option<TaskLabel>,
176    ) -> Task<R> {
177        let dispatcher = self.dispatcher.clone();
178        let location = core::panic::Location::caller();
179        let (runnable, task) = async_task::Builder::new()
180            .metadata(RunnableMeta { location })
181            .spawn(
182                move |_| future,
183                move |runnable| dispatcher.dispatch(RunnableVariant::Meta(runnable), label),
184            );
185        runnable.schedule();
186        Task(TaskState::Spawned(task))
187    }
188
189    /// Used by the test harness to run an async test in a synchronous fashion.
190    #[cfg(any(test, feature = "test-support"))]
191    #[track_caller]
192    pub fn block_test<R>(&self, future: impl Future<Output = R>) -> R {
193        if let Ok(value) = self.block_internal(false, future, None) {
194            value
195        } else {
196            unreachable!()
197        }
198    }
199
200    /// Block the current thread until the given future resolves.
201    /// Consider using `block_with_timeout` instead.
202    pub fn block<R>(&self, future: impl Future<Output = R>) -> R {
203        if let Ok(value) = self.block_internal(true, future, None) {
204            value
205        } else {
206            unreachable!()
207        }
208    }
209
210    #[cfg(not(any(test, feature = "test-support")))]
211    pub(crate) fn block_internal<Fut: Future>(
212        &self,
213        _background_only: bool,
214        future: Fut,
215        timeout: Option<Duration>,
216    ) -> Result<Fut::Output, impl Future<Output = Fut::Output> + use<Fut>> {
217        use std::time::Instant;
218
219        let mut future = Box::pin(future);
220        if timeout == Some(Duration::ZERO) {
221            return Err(future);
222        }
223        let deadline = timeout.map(|timeout| Instant::now() + timeout);
224
225        let parker = parking::Parker::new();
226        let unparker = parker.unparker();
227        let waker = waker_fn(move || {
228            unparker.unpark();
229        });
230        let mut cx = std::task::Context::from_waker(&waker);
231
232        loop {
233            match future.as_mut().poll(&mut cx) {
234                Poll::Ready(result) => return Ok(result),
235                Poll::Pending => {
236                    let timeout =
237                        deadline.map(|deadline| deadline.saturating_duration_since(Instant::now()));
238                    if let Some(timeout) = timeout {
239                        if !parker.park_timeout(timeout)
240                            && deadline.is_some_and(|deadline| deadline < Instant::now())
241                        {
242                            return Err(future);
243                        }
244                    } else {
245                        parker.park();
246                    }
247                }
248            }
249        }
250    }
251
252    #[cfg(any(test, feature = "test-support"))]
253    #[track_caller]
254    pub(crate) fn block_internal<Fut: Future>(
255        &self,
256        background_only: bool,
257        future: Fut,
258        timeout: Option<Duration>,
259    ) -> Result<Fut::Output, impl Future<Output = Fut::Output> + use<Fut>> {
260        use std::sync::atomic::AtomicBool;
261
262        use parking::Parker;
263
264        let mut future = Box::pin(future);
265        if timeout == Some(Duration::ZERO) {
266            return Err(future);
267        }
268        let Some(dispatcher) = self.dispatcher.as_test() else {
269            return Err(future);
270        };
271
272        let mut max_ticks = if timeout.is_some() {
273            dispatcher.gen_block_on_ticks()
274        } else {
275            usize::MAX
276        };
277
278        let parker = Parker::new();
279        let unparker = parker.unparker();
280
281        let awoken = Arc::new(AtomicBool::new(false));
282        let waker = waker_fn({
283            let awoken = awoken.clone();
284            let unparker = unparker.clone();
285            move || {
286                awoken.store(true, Ordering::SeqCst);
287                unparker.unpark();
288            }
289        });
290        let mut cx = std::task::Context::from_waker(&waker);
291
292        let duration = Duration::from_secs(
293            option_env!("GPUI_TEST_TIMEOUT")
294                .and_then(|s| s.parse::<u64>().ok())
295                .unwrap_or(180),
296        );
297        let mut test_should_end_by = Instant::now() + duration;
298
299        loop {
300            match future.as_mut().poll(&mut cx) {
301                Poll::Ready(result) => return Ok(result),
302                Poll::Pending => {
303                    if max_ticks == 0 {
304                        return Err(future);
305                    }
306                    max_ticks -= 1;
307
308                    if !dispatcher.tick(background_only) {
309                        if awoken.swap(false, Ordering::SeqCst) {
310                            continue;
311                        }
312
313                        if !dispatcher.parking_allowed() {
314                            if dispatcher.advance_clock_to_next_delayed() {
315                                continue;
316                            }
317                            let mut backtrace_message = String::new();
318                            let mut waiting_message = String::new();
319                            if let Some(backtrace) = dispatcher.waiting_backtrace() {
320                                backtrace_message =
321                                    format!("\nbacktrace of waiting future:\n{:?}", backtrace);
322                            }
323                            if let Some(waiting_hint) = dispatcher.waiting_hint() {
324                                waiting_message = format!("\n  waiting on: {}\n", waiting_hint);
325                            }
326                            panic!(
327                                "parked with nothing left to run{waiting_message}{backtrace_message}",
328                            )
329                        }
330                        dispatcher.set_unparker(unparker.clone());
331                        parker.park_timeout(
332                            test_should_end_by.saturating_duration_since(Instant::now()),
333                        );
334                        if Instant::now() > test_should_end_by {
335                            panic!("test timed out after {duration:?} with allow_parking")
336                        }
337                    }
338                }
339            }
340        }
341    }
342
343    /// Block the current thread until the given future resolves
344    /// or `duration` has elapsed.
345    pub fn block_with_timeout<Fut: Future>(
346        &self,
347        duration: Duration,
348        future: Fut,
349    ) -> Result<Fut::Output, impl Future<Output = Fut::Output> + use<Fut>> {
350        self.block_internal(true, future, Some(duration))
351    }
352
353    /// Scoped lets you start a number of tasks and waits
354    /// for all of them to complete before returning.
355    pub async fn scoped<'scope, F>(&self, scheduler: F)
356    where
357        F: FnOnce(&mut Scope<'scope>),
358    {
359        let mut scope = Scope::new(self.clone());
360        (scheduler)(&mut scope);
361        let spawned = mem::take(&mut scope.futures)
362            .into_iter()
363            .map(|f| self.spawn(f))
364            .collect::<Vec<_>>();
365        for task in spawned {
366            task.await;
367        }
368    }
369
370    /// Get the current time.
371    ///
372    /// Calling this instead of `std::time::Instant::now` allows the use
373    /// of fake timers in tests.
374    pub fn now(&self) -> Instant {
375        self.dispatcher.now()
376    }
377
378    /// Returns a task that will complete after the given duration.
379    /// Depending on other concurrent tasks the elapsed duration may be longer
380    /// than requested.
381    pub fn timer(&self, duration: Duration) -> Task<()> {
382        if duration.is_zero() {
383            return Task::ready(());
384        }
385        let location = core::panic::Location::caller();
386        let (runnable, task) = async_task::Builder::new()
387            .metadata(RunnableMeta { location })
388            .spawn(move |_| async move {}, {
389                let dispatcher = self.dispatcher.clone();
390                move |runnable| dispatcher.dispatch_after(duration, RunnableVariant::Meta(runnable))
391            });
392        runnable.schedule();
393        Task(TaskState::Spawned(task))
394    }
395
396    /// in tests, start_waiting lets you indicate which task is waiting (for debugging only)
397    #[cfg(any(test, feature = "test-support"))]
398    pub fn start_waiting(&self) {
399        self.dispatcher.as_test().unwrap().start_waiting();
400    }
401
402    /// in tests, removes the debugging data added by start_waiting
403    #[cfg(any(test, feature = "test-support"))]
404    pub fn finish_waiting(&self) {
405        self.dispatcher.as_test().unwrap().finish_waiting();
406    }
407
408    /// in tests, run an arbitrary number of tasks (determined by the SEED environment variable)
409    #[cfg(any(test, feature = "test-support"))]
410    pub fn simulate_random_delay(&self) -> impl Future<Output = ()> + use<> {
411        self.dispatcher.as_test().unwrap().simulate_random_delay()
412    }
413
414    /// in tests, indicate that a given task from `spawn_labeled` should run after everything else
415    #[cfg(any(test, feature = "test-support"))]
416    pub fn deprioritize(&self, task_label: TaskLabel) {
417        self.dispatcher.as_test().unwrap().deprioritize(task_label)
418    }
419
420    /// in tests, move time forward. This does not run any tasks, but does make `timer`s ready.
421    #[cfg(any(test, feature = "test-support"))]
422    pub fn advance_clock(&self, duration: Duration) {
423        self.dispatcher.as_test().unwrap().advance_clock(duration)
424    }
425
426    /// in tests, run one task.
427    #[cfg(any(test, feature = "test-support"))]
428    pub fn tick(&self) -> bool {
429        self.dispatcher.as_test().unwrap().tick(false)
430    }
431
432    /// in tests, run all tasks that are ready to run. If after doing so
433    /// the test still has outstanding tasks, this will panic. (See also [`Self::allow_parking`])
434    #[cfg(any(test, feature = "test-support"))]
435    pub fn run_until_parked(&self) {
436        self.dispatcher.as_test().unwrap().run_until_parked()
437    }
438
439    /// in tests, prevents `run_until_parked` from panicking if there are outstanding tasks.
440    /// This is useful when you are integrating other (non-GPUI) futures, like disk access, that
441    /// do take real async time to run.
442    #[cfg(any(test, feature = "test-support"))]
443    pub fn allow_parking(&self) {
444        self.dispatcher.as_test().unwrap().allow_parking();
445    }
446
447    /// undoes the effect of [`Self::allow_parking`].
448    #[cfg(any(test, feature = "test-support"))]
449    pub fn forbid_parking(&self) {
450        self.dispatcher.as_test().unwrap().forbid_parking();
451    }
452
453    /// adds detail to the "parked with nothing let to run" message.
454    #[cfg(any(test, feature = "test-support"))]
455    pub fn set_waiting_hint(&self, msg: Option<String>) {
456        self.dispatcher.as_test().unwrap().set_waiting_hint(msg);
457    }
458
459    /// in tests, returns the rng used by the dispatcher and seeded by the `SEED` environment variable
460    #[cfg(any(test, feature = "test-support"))]
461    pub fn rng(&self) -> StdRng {
462        self.dispatcher.as_test().unwrap().rng()
463    }
464
465    /// How many CPUs are available to the dispatcher.
466    pub fn num_cpus(&self) -> usize {
467        #[cfg(any(test, feature = "test-support"))]
468        return 4;
469
470        #[cfg(not(any(test, feature = "test-support")))]
471        return num_cpus::get();
472    }
473
474    /// Whether we're on the main thread.
475    pub fn is_main_thread(&self) -> bool {
476        self.dispatcher.is_main_thread()
477    }
478
479    #[cfg(any(test, feature = "test-support"))]
480    /// in tests, control the number of ticks that `block_with_timeout` will run before timing out.
481    pub fn set_block_on_ticks(&self, range: std::ops::RangeInclusive<usize>) {
482        self.dispatcher.as_test().unwrap().set_block_on_ticks(range);
483    }
484}
485
486/// ForegroundExecutor runs things on the main thread.
487impl ForegroundExecutor {
488    /// Creates a new ForegroundExecutor from the given PlatformDispatcher.
489    pub fn new(dispatcher: Arc<dyn PlatformDispatcher>) -> Self {
490        Self {
491            dispatcher,
492            not_send: PhantomData,
493        }
494    }
495
496    /// Enqueues the given Task to run on the main thread at some point in the future.
497    #[track_caller]
498    pub fn spawn<R>(&self, future: impl Future<Output = R> + 'static) -> Task<R>
499    where
500        R: 'static,
501    {
502        let dispatcher = self.dispatcher.clone();
503        let location = core::panic::Location::caller();
504
505        #[track_caller]
506        fn inner<R: 'static>(
507            dispatcher: Arc<dyn PlatformDispatcher>,
508            future: AnyLocalFuture<R>,
509            location: &'static core::panic::Location<'static>,
510        ) -> Task<R> {
511            let (runnable, task) = spawn_local_with_source_location(
512                future,
513                move |runnable| dispatcher.dispatch_on_main_thread(RunnableVariant::Meta(runnable)),
514                RunnableMeta { location },
515            );
516            runnable.schedule();
517            Task(TaskState::Spawned(task))
518        }
519        inner::<R>(dispatcher, Box::pin(future), location)
520    }
521}
522
523/// Variant of `async_task::spawn_local` that includes the source location of the spawn in panics.
524///
525/// Copy-modified from:
526/// <https://github.com/smol-rs/async-task/blob/ca9dbe1db9c422fd765847fa91306e30a6bb58a9/src/runnable.rs#L405>
527#[track_caller]
528fn spawn_local_with_source_location<Fut, S, M>(
529    future: Fut,
530    schedule: S,
531    metadata: M,
532) -> (Runnable<M>, async_task::Task<Fut::Output, M>)
533where
534    Fut: Future + 'static,
535    Fut::Output: 'static,
536    S: async_task::Schedule<M> + Send + Sync + 'static,
537    M: 'static,
538{
539    #[inline]
540    fn thread_id() -> ThreadId {
541        std::thread_local! {
542            static ID: ThreadId = thread::current().id();
543        }
544        ID.try_with(|id| *id)
545            .unwrap_or_else(|_| thread::current().id())
546    }
547
548    struct Checked<F> {
549        id: ThreadId,
550        inner: ManuallyDrop<F>,
551        location: &'static Location<'static>,
552    }
553
554    impl<F> Drop for Checked<F> {
555        fn drop(&mut self) {
556            assert!(
557                self.id == thread_id(),
558                "local task dropped by a thread that didn't spawn it. Task spawned at {}",
559                self.location
560            );
561            unsafe { ManuallyDrop::drop(&mut self.inner) };
562        }
563    }
564
565    impl<F: Future> Future for Checked<F> {
566        type Output = F::Output;
567
568        fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
569            assert!(
570                self.id == thread_id(),
571                "local task polled by a thread that didn't spawn it. Task spawned at {}",
572                self.location
573            );
574            unsafe { self.map_unchecked_mut(|c| &mut *c.inner).poll(cx) }
575        }
576    }
577
578    // Wrap the future into one that checks which thread it's on.
579    let future = Checked {
580        id: thread_id(),
581        inner: ManuallyDrop::new(future),
582        location: Location::caller(),
583    };
584
585    unsafe {
586        async_task::Builder::new()
587            .metadata(metadata)
588            .spawn_unchecked(move |_| future, schedule)
589    }
590}
591
592/// Scope manages a set of tasks that are enqueued and waited on together. See [`BackgroundExecutor::scoped`].
593pub struct Scope<'a> {
594    executor: BackgroundExecutor,
595    futures: Vec<Pin<Box<dyn Future<Output = ()> + Send + 'static>>>,
596    tx: Option<mpsc::Sender<()>>,
597    rx: mpsc::Receiver<()>,
598    lifetime: PhantomData<&'a ()>,
599}
600
601impl<'a> Scope<'a> {
602    fn new(executor: BackgroundExecutor) -> Self {
603        let (tx, rx) = mpsc::channel(1);
604        Self {
605            executor,
606            tx: Some(tx),
607            rx,
608            futures: Default::default(),
609            lifetime: PhantomData,
610        }
611    }
612
613    /// How many CPUs are available to the dispatcher.
614    pub fn num_cpus(&self) -> usize {
615        self.executor.num_cpus()
616    }
617
618    /// Spawn a future into this scope.
619    #[track_caller]
620    pub fn spawn<F>(&mut self, f: F)
621    where
622        F: Future<Output = ()> + Send + 'a,
623    {
624        let tx = self.tx.clone().unwrap();
625
626        // SAFETY: The 'a lifetime is guaranteed to outlive any of these futures because
627        // dropping this `Scope` blocks until all of the futures have resolved.
628        let f = unsafe {
629            mem::transmute::<
630                Pin<Box<dyn Future<Output = ()> + Send + 'a>>,
631                Pin<Box<dyn Future<Output = ()> + Send + 'static>>,
632            >(Box::pin(async move {
633                f.await;
634                drop(tx);
635            }))
636        };
637        self.futures.push(f);
638    }
639}
640
641impl Drop for Scope<'_> {
642    fn drop(&mut self) {
643        self.tx.take().unwrap();
644
645        // Wait until the channel is closed, which means that all of the spawned
646        // futures have resolved.
647        self.executor.block(self.rx.next());
648    }
649}