executor.rs

  1use crate::{AppContext, PlatformDispatcher};
  2use async_task::Runnable;
  3use futures::channel::mpsc;
  4use smol::prelude::*;
  5use std::mem::ManuallyDrop;
  6use std::panic::Location;
  7use std::thread::{self, ThreadId};
  8use std::{
  9    fmt::Debug,
 10    marker::PhantomData,
 11    mem,
 12    num::NonZeroUsize,
 13    pin::Pin,
 14    rc::Rc,
 15    sync::{
 16        atomic::{AtomicUsize, Ordering::SeqCst},
 17        Arc,
 18    },
 19    task::{Context, Poll},
 20    time::{Duration, Instant},
 21};
 22use util::TryFutureExt;
 23use waker_fn::waker_fn;
 24
 25#[cfg(any(test, feature = "test-support"))]
 26use rand::rngs::StdRng;
 27
 28/// A pointer to the executor that is currently running,
 29/// for spawning background tasks.
 30#[derive(Clone)]
 31pub struct BackgroundExecutor {
 32    #[doc(hidden)]
 33    pub dispatcher: Arc<dyn PlatformDispatcher>,
 34}
 35
 36/// A pointer to the executor that is currently running,
 37/// for spawning tasks on the main thread.
 38#[derive(Clone)]
 39pub struct ForegroundExecutor {
 40    #[doc(hidden)]
 41    pub dispatcher: Arc<dyn PlatformDispatcher>,
 42    not_send: PhantomData<Rc<()>>,
 43}
 44
 45/// Task is a primitive that allows work to happen in the background.
 46///
 47/// It implements [`Future`] so you can `.await` on it.
 48///
 49/// If you drop a task it will be cancelled immediately. Calling [`Task::detach`] allows
 50/// the task to continue running, but with no way to return a value.
 51#[must_use]
 52#[derive(Debug)]
 53pub enum Task<T> {
 54    /// A task that is ready to return a value
 55    Ready(Option<T>),
 56
 57    /// A task that is currently running.
 58    Spawned(async_task::Task<T>),
 59}
 60
 61impl<T> Task<T> {
 62    /// Creates a new task that will resolve with the value
 63    pub fn ready(val: T) -> Self {
 64        Task::Ready(Some(val))
 65    }
 66
 67    /// Detaching a task runs it to completion in the background
 68    pub fn detach(self) {
 69        match self {
 70            Task::Ready(_) => {}
 71            Task::Spawned(task) => task.detach(),
 72        }
 73    }
 74}
 75
 76impl<E, T> Task<Result<T, E>>
 77where
 78    T: 'static,
 79    E: 'static + Debug,
 80{
 81    /// Run the task to completion in the background and log any
 82    /// errors that occur.
 83    #[track_caller]
 84    pub fn detach_and_log_err(self, cx: &AppContext) {
 85        let location = core::panic::Location::caller();
 86        cx.foreground_executor()
 87            .spawn(self.log_tracked_err(*location))
 88            .detach();
 89    }
 90}
 91
 92impl<T> Future for Task<T> {
 93    type Output = T;
 94
 95    fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
 96        match unsafe { self.get_unchecked_mut() } {
 97            Task::Ready(val) => Poll::Ready(val.take().unwrap()),
 98            Task::Spawned(task) => task.poll(cx),
 99        }
100    }
101}
102
103/// A task label is an opaque identifier that you can use to
104/// refer to a task in tests.
105#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
106pub struct TaskLabel(NonZeroUsize);
107
108impl Default for TaskLabel {
109    fn default() -> Self {
110        Self::new()
111    }
112}
113
114impl TaskLabel {
115    /// Construct a new task label.
116    pub fn new() -> Self {
117        static NEXT_TASK_LABEL: AtomicUsize = AtomicUsize::new(1);
118        Self(NEXT_TASK_LABEL.fetch_add(1, SeqCst).try_into().unwrap())
119    }
120}
121
122type AnyLocalFuture<R> = Pin<Box<dyn 'static + Future<Output = R>>>;
123
124type AnyFuture<R> = Pin<Box<dyn 'static + Send + Future<Output = R>>>;
125
126/// BackgroundExecutor lets you run things on background threads.
127/// In production this is a thread pool with no ordering guarantees.
128/// In tests this is simulated by running tasks one by one in a deterministic
129/// (but arbitrary) order controlled by the `SEED` environment variable.
130impl BackgroundExecutor {
131    #[doc(hidden)]
132    pub fn new(dispatcher: Arc<dyn PlatformDispatcher>) -> Self {
133        Self { dispatcher }
134    }
135
136    /// Enqueues the given future to be run to completion on a background thread.
137    pub fn spawn<R>(&self, future: impl Future<Output = R> + Send + 'static) -> Task<R>
138    where
139        R: Send + 'static,
140    {
141        self.spawn_internal::<R>(Box::pin(future), None)
142    }
143
144    /// Enqueues the given future to be run to completion on a background thread.
145    /// The given label can be used to control the priority of the task in tests.
146    pub fn spawn_labeled<R>(
147        &self,
148        label: TaskLabel,
149        future: impl Future<Output = R> + Send + 'static,
150    ) -> Task<R>
151    where
152        R: Send + 'static,
153    {
154        self.spawn_internal::<R>(Box::pin(future), Some(label))
155    }
156
157    fn spawn_internal<R: Send + 'static>(
158        &self,
159        future: AnyFuture<R>,
160        label: Option<TaskLabel>,
161    ) -> Task<R> {
162        let dispatcher = self.dispatcher.clone();
163        let (runnable, task) =
164            async_task::spawn(future, move |runnable| dispatcher.dispatch(runnable, label));
165        runnable.schedule();
166        Task::Spawned(task)
167    }
168
169    /// Used by the test harness to run an async test in a synchronous fashion.
170    #[cfg(any(test, feature = "test-support"))]
171    #[track_caller]
172    pub fn block_test<R>(&self, future: impl Future<Output = R>) -> R {
173        if let Ok(value) = self.block_internal(false, future, None) {
174            value
175        } else {
176            unreachable!()
177        }
178    }
179
180    /// Block the current thread until the given future resolves.
181    /// Consider using `block_with_timeout` instead.
182    pub fn block<R>(&self, future: impl Future<Output = R>) -> R {
183        if let Ok(value) = self.block_internal(true, future, None) {
184            value
185        } else {
186            unreachable!()
187        }
188    }
189
190    #[cfg(not(any(test, feature = "test-support")))]
191    pub(crate) fn block_internal<R>(
192        &self,
193        _background_only: bool,
194        future: impl Future<Output = R>,
195        timeout: Option<Duration>,
196    ) -> Result<R, impl Future<Output = R>> {
197        use std::time::Instant;
198
199        let mut future = Box::pin(future);
200        if timeout == Some(Duration::ZERO) {
201            return Err(future);
202        }
203        let deadline = timeout.map(|timeout| Instant::now() + timeout);
204
205        let unparker = self.dispatcher.unparker();
206        let waker = waker_fn(move || {
207            unparker.unpark();
208        });
209        let mut cx = std::task::Context::from_waker(&waker);
210
211        loop {
212            match future.as_mut().poll(&mut cx) {
213                Poll::Ready(result) => return Ok(result),
214                Poll::Pending => {
215                    let timeout =
216                        deadline.map(|deadline| deadline.saturating_duration_since(Instant::now()));
217                    if !self.dispatcher.park(timeout)
218                        && deadline.is_some_and(|deadline| deadline < Instant::now())
219                    {
220                        return Err(future);
221                    }
222                }
223            }
224        }
225    }
226
227    #[cfg(any(test, feature = "test-support"))]
228    #[track_caller]
229    pub(crate) fn block_internal<R>(
230        &self,
231        background_only: bool,
232        future: impl Future<Output = R>,
233        timeout: Option<Duration>,
234    ) -> Result<R, impl Future<Output = R>> {
235        use std::sync::atomic::AtomicBool;
236
237        let mut future = Box::pin(future);
238        if timeout == Some(Duration::ZERO) {
239            return Err(future);
240        }
241        let Some(dispatcher) = self.dispatcher.as_test() else {
242            return Err(future);
243        };
244
245        let mut max_ticks = if timeout.is_some() {
246            dispatcher.gen_block_on_ticks()
247        } else {
248            usize::MAX
249        };
250        let unparker = self.dispatcher.unparker();
251        let awoken = Arc::new(AtomicBool::new(false));
252        let waker = waker_fn({
253            let awoken = awoken.clone();
254            move || {
255                awoken.store(true, SeqCst);
256                unparker.unpark();
257            }
258        });
259        let mut cx = std::task::Context::from_waker(&waker);
260
261        loop {
262            match future.as_mut().poll(&mut cx) {
263                Poll::Ready(result) => return Ok(result),
264                Poll::Pending => {
265                    if max_ticks == 0 {
266                        return Err(future);
267                    }
268                    max_ticks -= 1;
269
270                    if !dispatcher.tick(background_only) {
271                        if awoken.swap(false, SeqCst) {
272                            continue;
273                        }
274
275                        if !dispatcher.parking_allowed() {
276                            let mut backtrace_message = String::new();
277                            let mut waiting_message = String::new();
278                            if let Some(backtrace) = dispatcher.waiting_backtrace() {
279                                backtrace_message =
280                                    format!("\nbacktrace of waiting future:\n{:?}", backtrace);
281                            }
282                            if let Some(waiting_hint) = dispatcher.waiting_hint() {
283                                waiting_message = format!("\n  waiting on: {}\n", waiting_hint);
284                            }
285                            panic!(
286                                    "parked with nothing left to run{waiting_message}{backtrace_message}",
287                                )
288                        }
289                        self.dispatcher.park(None);
290                    }
291                }
292            }
293        }
294    }
295
296    /// Block the current thread until the given future resolves
297    /// or `duration` has elapsed.
298    pub fn block_with_timeout<R>(
299        &self,
300        duration: Duration,
301        future: impl Future<Output = R>,
302    ) -> Result<R, impl Future<Output = R>> {
303        self.block_internal(true, future, Some(duration))
304    }
305
306    /// Scoped lets you start a number of tasks and waits
307    /// for all of them to complete before returning.
308    pub async fn scoped<'scope, F>(&self, scheduler: F)
309    where
310        F: FnOnce(&mut Scope<'scope>),
311    {
312        let mut scope = Scope::new(self.clone());
313        (scheduler)(&mut scope);
314        let spawned = mem::take(&mut scope.futures)
315            .into_iter()
316            .map(|f| self.spawn(f))
317            .collect::<Vec<_>>();
318        for task in spawned {
319            task.await;
320        }
321    }
322
323    /// Get the current time.
324    ///
325    /// Calling this instead of `std::time::Instant::now` allows the use
326    /// of fake timers in tests.
327    pub fn now(&self) -> Instant {
328        self.dispatcher.now()
329    }
330
331    /// Returns a task that will complete after the given duration.
332    /// Depending on other concurrent tasks the elapsed duration may be longer
333    /// than requested.
334    pub fn timer(&self, duration: Duration) -> Task<()> {
335        if duration.is_zero() {
336            return Task::ready(());
337        }
338        let (runnable, task) = async_task::spawn(async move {}, {
339            let dispatcher = self.dispatcher.clone();
340            move |runnable| dispatcher.dispatch_after(duration, runnable)
341        });
342        runnable.schedule();
343        Task::Spawned(task)
344    }
345
346    /// in tests, start_waiting lets you indicate which task is waiting (for debugging only)
347    #[cfg(any(test, feature = "test-support"))]
348    pub fn start_waiting(&self) {
349        self.dispatcher.as_test().unwrap().start_waiting();
350    }
351
352    /// in tests, removes the debugging data added by start_waiting
353    #[cfg(any(test, feature = "test-support"))]
354    pub fn finish_waiting(&self) {
355        self.dispatcher.as_test().unwrap().finish_waiting();
356    }
357
358    /// in tests, run an arbitrary number of tasks (determined by the SEED environment variable)
359    #[cfg(any(test, feature = "test-support"))]
360    pub fn simulate_random_delay(&self) -> impl Future<Output = ()> {
361        self.dispatcher.as_test().unwrap().simulate_random_delay()
362    }
363
364    /// in tests, indicate that a given task from `spawn_labeled` should run after everything else
365    #[cfg(any(test, feature = "test-support"))]
366    pub fn deprioritize(&self, task_label: TaskLabel) {
367        self.dispatcher.as_test().unwrap().deprioritize(task_label)
368    }
369
370    /// in tests, move time forward. This does not run any tasks, but does make `timer`s ready.
371    #[cfg(any(test, feature = "test-support"))]
372    pub fn advance_clock(&self, duration: Duration) {
373        self.dispatcher.as_test().unwrap().advance_clock(duration)
374    }
375
376    /// in tests, run one task.
377    #[cfg(any(test, feature = "test-support"))]
378    pub fn tick(&self) -> bool {
379        self.dispatcher.as_test().unwrap().tick(false)
380    }
381
382    /// in tests, run all tasks that are ready to run. If after doing so
383    /// the test still has outstanding tasks, this will panic. (See also `allow_parking`)
384    #[cfg(any(test, feature = "test-support"))]
385    pub fn run_until_parked(&self) {
386        self.dispatcher.as_test().unwrap().run_until_parked()
387    }
388
389    /// in tests, prevents `run_until_parked` from panicking if there are outstanding tasks.
390    /// This is useful when you are integrating other (non-GPUI) futures, like disk access, that
391    /// do take real async time to run.
392    #[cfg(any(test, feature = "test-support"))]
393    pub fn allow_parking(&self) {
394        self.dispatcher.as_test().unwrap().allow_parking();
395    }
396
397    /// undoes the effect of [`allow_parking`].
398    #[cfg(any(test, feature = "test-support"))]
399    pub fn forbid_parking(&self) {
400        self.dispatcher.as_test().unwrap().forbid_parking();
401    }
402
403    /// adds detail to the "parked with nothing let to run" message.
404    #[cfg(any(test, feature = "test-support"))]
405    pub fn set_waiting_hint(&self, msg: Option<String>) {
406        self.dispatcher.as_test().unwrap().set_waiting_hint(msg);
407    }
408
409    /// in tests, returns the rng used by the dispatcher and seeded by the `SEED` environment variable
410    #[cfg(any(test, feature = "test-support"))]
411    pub fn rng(&self) -> StdRng {
412        self.dispatcher.as_test().unwrap().rng()
413    }
414
415    /// How many CPUs are available to the dispatcher.
416    pub fn num_cpus(&self) -> usize {
417        #[cfg(any(test, feature = "test-support"))]
418        return 4;
419
420        #[cfg(not(any(test, feature = "test-support")))]
421        return num_cpus::get();
422    }
423
424    /// Whether we're on the main thread.
425    pub fn is_main_thread(&self) -> bool {
426        self.dispatcher.is_main_thread()
427    }
428
429    #[cfg(any(test, feature = "test-support"))]
430    /// in tests, control the number of ticks that `block_with_timeout` will run before timing out.
431    pub fn set_block_on_ticks(&self, range: std::ops::RangeInclusive<usize>) {
432        self.dispatcher.as_test().unwrap().set_block_on_ticks(range);
433    }
434}
435
436/// ForegroundExecutor runs things on the main thread.
437impl ForegroundExecutor {
438    /// Creates a new ForegroundExecutor from the given PlatformDispatcher.
439    pub fn new(dispatcher: Arc<dyn PlatformDispatcher>) -> Self {
440        Self {
441            dispatcher,
442            not_send: PhantomData,
443        }
444    }
445
446    /// Enqueues the given Task to run on the main thread at some point in the future.
447    #[track_caller]
448    pub fn spawn<R>(&self, future: impl Future<Output = R> + 'static) -> Task<R>
449    where
450        R: 'static,
451    {
452        let dispatcher = self.dispatcher.clone();
453
454        #[track_caller]
455        fn inner<R: 'static>(
456            dispatcher: Arc<dyn PlatformDispatcher>,
457            future: AnyLocalFuture<R>,
458        ) -> Task<R> {
459            let (runnable, task) = spawn_local_with_source_location(future, move |runnable| {
460                dispatcher.dispatch_on_main_thread(runnable)
461            });
462            runnable.schedule();
463            Task::Spawned(task)
464        }
465        inner::<R>(dispatcher, Box::pin(future))
466    }
467}
468
469/// Variant of `async_task::spawn_local` that includes the source location of the spawn in panics.
470///
471/// Copy-modified from:
472/// https://github.com/smol-rs/async-task/blob/ca9dbe1db9c422fd765847fa91306e30a6bb58a9/src/runnable.rs#L405
473#[track_caller]
474fn spawn_local_with_source_location<Fut, S>(
475    future: Fut,
476    schedule: S,
477) -> (Runnable<()>, async_task::Task<Fut::Output, ()>)
478where
479    Fut: Future + 'static,
480    Fut::Output: 'static,
481    S: async_task::Schedule<()> + Send + Sync + 'static,
482{
483    #[inline]
484    fn thread_id() -> ThreadId {
485        std::thread_local! {
486            static ID: ThreadId = thread::current().id();
487        }
488        ID.try_with(|id| *id)
489            .unwrap_or_else(|_| thread::current().id())
490    }
491
492    struct Checked<F> {
493        id: ThreadId,
494        inner: ManuallyDrop<F>,
495        location: &'static Location<'static>,
496    }
497
498    impl<F> Drop for Checked<F> {
499        fn drop(&mut self) {
500            assert!(
501                self.id == thread_id(),
502                "local task dropped by a thread that didn't spawn it. Task spawned at {}",
503                self.location
504            );
505            unsafe {
506                ManuallyDrop::drop(&mut self.inner);
507            }
508        }
509    }
510
511    impl<F: Future> Future for Checked<F> {
512        type Output = F::Output;
513
514        fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
515            assert!(
516                self.id == thread_id(),
517                "local task polled by a thread that didn't spawn it. Task spawned at {}",
518                self.location
519            );
520            unsafe { self.map_unchecked_mut(|c| &mut *c.inner).poll(cx) }
521        }
522    }
523
524    // Wrap the future into one that checks which thread it's on.
525    let future = Checked {
526        id: thread_id(),
527        inner: ManuallyDrop::new(future),
528        location: Location::caller(),
529    };
530
531    unsafe { async_task::spawn_unchecked(future, schedule) }
532}
533
534/// Scope manages a set of tasks that are enqueued and waited on together. See [`BackgroundExecutor::scoped`].
535pub struct Scope<'a> {
536    executor: BackgroundExecutor,
537    futures: Vec<Pin<Box<dyn Future<Output = ()> + Send + 'static>>>,
538    tx: Option<mpsc::Sender<()>>,
539    rx: mpsc::Receiver<()>,
540    lifetime: PhantomData<&'a ()>,
541}
542
543impl<'a> Scope<'a> {
544    fn new(executor: BackgroundExecutor) -> Self {
545        let (tx, rx) = mpsc::channel(1);
546        Self {
547            executor,
548            tx: Some(tx),
549            rx,
550            futures: Default::default(),
551            lifetime: PhantomData,
552        }
553    }
554
555    /// How many CPUs are available to the dispatcher.
556    pub fn num_cpus(&self) -> usize {
557        self.executor.num_cpus()
558    }
559
560    /// Spawn a future into this scope.
561    pub fn spawn<F>(&mut self, f: F)
562    where
563        F: Future<Output = ()> + Send + 'a,
564    {
565        let tx = self.tx.clone().unwrap();
566
567        // SAFETY: The 'a lifetime is guaranteed to outlive any of these futures because
568        // dropping this `Scope` blocks until all of the futures have resolved.
569        let f = unsafe {
570            mem::transmute::<
571                Pin<Box<dyn Future<Output = ()> + Send + 'a>>,
572                Pin<Box<dyn Future<Output = ()> + Send + 'static>>,
573            >(Box::pin(async move {
574                f.await;
575                drop(tx);
576            }))
577        };
578        self.futures.push(f);
579    }
580}
581
582impl<'a> Drop for Scope<'a> {
583    fn drop(&mut self) {
584        self.tx.take().unwrap();
585
586        // Wait until the channel is closed, which means that all of the spawned
587        // futures have resolved.
588        self.executor.block(self.rx.next());
589    }
590}