executor.rs

  1use crate::{AppContext, PlatformDispatcher};
  2use async_task::Runnable;
  3use futures::channel::mpsc;
  4use smol::prelude::*;
  5use std::mem::ManuallyDrop;
  6use std::panic::Location;
  7use std::thread::{self, ThreadId};
  8use std::{
  9    fmt::Debug,
 10    marker::PhantomData,
 11    mem,
 12    num::NonZeroUsize,
 13    pin::Pin,
 14    rc::Rc,
 15    sync::{
 16        atomic::{AtomicUsize, Ordering::SeqCst},
 17        Arc,
 18    },
 19    task::{Context, Poll},
 20    time::{Duration, Instant},
 21};
 22use util::TryFutureExt;
 23use waker_fn::waker_fn;
 24
 25#[cfg(any(test, feature = "test-support"))]
 26use rand::rngs::StdRng;
 27
 28/// A pointer to the executor that is currently running,
 29/// for spawning background tasks.
 30#[derive(Clone)]
 31pub struct BackgroundExecutor {
 32    #[doc(hidden)]
 33    pub dispatcher: Arc<dyn PlatformDispatcher>,
 34}
 35
 36/// A pointer to the executor that is currently running,
 37/// for spawning tasks on the main thread.
 38#[derive(Clone)]
 39pub struct ForegroundExecutor {
 40    #[doc(hidden)]
 41    pub dispatcher: Arc<dyn PlatformDispatcher>,
 42    not_send: PhantomData<Rc<()>>,
 43}
 44
 45/// Task is a primitive that allows work to happen in the background.
 46///
 47/// It implements [`Future`] so you can `.await` on it.
 48///
 49/// If you drop a task it will be cancelled immediately. Calling [`Task::detach`] allows
 50/// the task to continue running, but with no way to return a value.
 51#[must_use]
 52#[derive(Debug)]
 53pub struct Task<T>(TaskState<T>);
 54
 55#[derive(Debug)]
 56enum TaskState<T> {
 57    /// A task that is ready to return a value
 58    Ready(Option<T>),
 59
 60    /// A task that is currently running.
 61    Spawned(async_task::Task<T>),
 62}
 63
 64impl<T> Task<T> {
 65    /// Creates a new task that will resolve with the value
 66    pub fn ready(val: T) -> Self {
 67        Task(TaskState::Ready(Some(val)))
 68    }
 69
 70    /// Returns the task's result if it is already know. The only known usecase for this is for
 71    /// skipping spawning another task that awaits on this one.
 72    pub fn get_ready(self) -> Option<T> {
 73        match self {
 74            Task(TaskState::Ready(val)) => val,
 75            Task(TaskState::Spawned(_)) => None,
 76        }
 77    }
 78
 79    /// Detaching a task runs it to completion in the background
 80    pub fn detach(self) {
 81        match self {
 82            Task(TaskState::Ready(_)) => {}
 83            Task(TaskState::Spawned(task)) => task.detach(),
 84        }
 85    }
 86}
 87
 88impl<E, T> Task<Result<T, E>>
 89where
 90    T: 'static,
 91    E: 'static + Debug,
 92{
 93    /// Run the task to completion in the background and log any
 94    /// errors that occur.
 95    #[track_caller]
 96    pub fn detach_and_log_err(self, cx: &AppContext) {
 97        let location = core::panic::Location::caller();
 98        cx.foreground_executor()
 99            .spawn(self.log_tracked_err(*location))
100            .detach();
101    }
102}
103
104impl<T> Future for Task<T> {
105    type Output = T;
106
107    fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
108        match unsafe { self.get_unchecked_mut() } {
109            Task(TaskState::Ready(val)) => Poll::Ready(val.take().unwrap()),
110            Task(TaskState::Spawned(task)) => task.poll(cx),
111        }
112    }
113}
114
115/// A task label is an opaque identifier that you can use to
116/// refer to a task in tests.
117#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
118pub struct TaskLabel(NonZeroUsize);
119
120impl Default for TaskLabel {
121    fn default() -> Self {
122        Self::new()
123    }
124}
125
126impl TaskLabel {
127    /// Construct a new task label.
128    pub fn new() -> Self {
129        static NEXT_TASK_LABEL: AtomicUsize = AtomicUsize::new(1);
130        Self(NEXT_TASK_LABEL.fetch_add(1, SeqCst).try_into().unwrap())
131    }
132}
133
134type AnyLocalFuture<R> = Pin<Box<dyn 'static + Future<Output = R>>>;
135
136type AnyFuture<R> = Pin<Box<dyn 'static + Send + Future<Output = R>>>;
137
138/// BackgroundExecutor lets you run things on background threads.
139/// In production this is a thread pool with no ordering guarantees.
140/// In tests this is simulated by running tasks one by one in a deterministic
141/// (but arbitrary) order controlled by the `SEED` environment variable.
142impl BackgroundExecutor {
143    #[doc(hidden)]
144    pub fn new(dispatcher: Arc<dyn PlatformDispatcher>) -> Self {
145        Self { dispatcher }
146    }
147
148    /// Enqueues the given future to be run to completion on a background thread.
149    pub fn spawn<R>(&self, future: impl Future<Output = R> + Send + 'static) -> Task<R>
150    where
151        R: Send + 'static,
152    {
153        self.spawn_internal::<R>(Box::pin(future), None)
154    }
155
156    /// Enqueues the given future to be run to completion on a background thread.
157    /// The given label can be used to control the priority of the task in tests.
158    pub fn spawn_labeled<R>(
159        &self,
160        label: TaskLabel,
161        future: impl Future<Output = R> + Send + 'static,
162    ) -> Task<R>
163    where
164        R: Send + 'static,
165    {
166        self.spawn_internal::<R>(Box::pin(future), Some(label))
167    }
168
169    fn spawn_internal<R: Send + 'static>(
170        &self,
171        future: AnyFuture<R>,
172        label: Option<TaskLabel>,
173    ) -> Task<R> {
174        let dispatcher = self.dispatcher.clone();
175        let (runnable, task) =
176            async_task::spawn(future, move |runnable| dispatcher.dispatch(runnable, label));
177        runnable.schedule();
178        Task(TaskState::Spawned(task))
179    }
180
181    /// Used by the test harness to run an async test in a synchronous fashion.
182    #[cfg(any(test, feature = "test-support"))]
183    #[track_caller]
184    pub fn block_test<R>(&self, future: impl Future<Output = R>) -> R {
185        if let Ok(value) = self.block_internal(false, future, None) {
186            value
187        } else {
188            unreachable!()
189        }
190    }
191
192    /// Block the current thread until the given future resolves.
193    /// Consider using `block_with_timeout` instead.
194    pub fn block<R>(&self, future: impl Future<Output = R>) -> R {
195        if let Ok(value) = self.block_internal(true, future, None) {
196            value
197        } else {
198            unreachable!()
199        }
200    }
201
202    #[cfg(not(any(test, feature = "test-support")))]
203    pub(crate) fn block_internal<R>(
204        &self,
205        _background_only: bool,
206        future: impl Future<Output = R>,
207        timeout: Option<Duration>,
208    ) -> Result<R, impl Future<Output = R>> {
209        use std::time::Instant;
210
211        let mut future = Box::pin(future);
212        if timeout == Some(Duration::ZERO) {
213            return Err(future);
214        }
215        let deadline = timeout.map(|timeout| Instant::now() + timeout);
216
217        let unparker = self.dispatcher.unparker();
218        let waker = waker_fn(move || {
219            unparker.unpark();
220        });
221        let mut cx = std::task::Context::from_waker(&waker);
222
223        loop {
224            match future.as_mut().poll(&mut cx) {
225                Poll::Ready(result) => return Ok(result),
226                Poll::Pending => {
227                    let timeout =
228                        deadline.map(|deadline| deadline.saturating_duration_since(Instant::now()));
229                    if !self.dispatcher.park(timeout)
230                        && deadline.is_some_and(|deadline| deadline < Instant::now())
231                    {
232                        return Err(future);
233                    }
234                }
235            }
236        }
237    }
238
239    #[cfg(any(test, feature = "test-support"))]
240    #[track_caller]
241    pub(crate) fn block_internal<R>(
242        &self,
243        background_only: bool,
244        future: impl Future<Output = R>,
245        timeout: Option<Duration>,
246    ) -> Result<R, impl Future<Output = R>> {
247        use std::sync::atomic::AtomicBool;
248
249        let mut future = Box::pin(future);
250        if timeout == Some(Duration::ZERO) {
251            return Err(future);
252        }
253        let Some(dispatcher) = self.dispatcher.as_test() else {
254            return Err(future);
255        };
256
257        let mut max_ticks = if timeout.is_some() {
258            dispatcher.gen_block_on_ticks()
259        } else {
260            usize::MAX
261        };
262        let unparker = self.dispatcher.unparker();
263        let awoken = Arc::new(AtomicBool::new(false));
264        let waker = waker_fn({
265            let awoken = awoken.clone();
266            move || {
267                awoken.store(true, SeqCst);
268                unparker.unpark();
269            }
270        });
271        let mut cx = std::task::Context::from_waker(&waker);
272
273        loop {
274            match future.as_mut().poll(&mut cx) {
275                Poll::Ready(result) => return Ok(result),
276                Poll::Pending => {
277                    if max_ticks == 0 {
278                        return Err(future);
279                    }
280                    max_ticks -= 1;
281
282                    if !dispatcher.tick(background_only) {
283                        if awoken.swap(false, SeqCst) {
284                            continue;
285                        }
286
287                        if !dispatcher.parking_allowed() {
288                            let mut backtrace_message = String::new();
289                            let mut waiting_message = String::new();
290                            if let Some(backtrace) = dispatcher.waiting_backtrace() {
291                                backtrace_message =
292                                    format!("\nbacktrace of waiting future:\n{:?}", backtrace);
293                            }
294                            if let Some(waiting_hint) = dispatcher.waiting_hint() {
295                                waiting_message = format!("\n  waiting on: {}\n", waiting_hint);
296                            }
297                            panic!(
298                                    "parked with nothing left to run{waiting_message}{backtrace_message}",
299                                )
300                        }
301                        self.dispatcher.park(None);
302                    }
303                }
304            }
305        }
306    }
307
308    /// Block the current thread until the given future resolves
309    /// or `duration` has elapsed.
310    pub fn block_with_timeout<R>(
311        &self,
312        duration: Duration,
313        future: impl Future<Output = R>,
314    ) -> Result<R, impl Future<Output = R>> {
315        self.block_internal(true, future, Some(duration))
316    }
317
318    /// Scoped lets you start a number of tasks and waits
319    /// for all of them to complete before returning.
320    pub async fn scoped<'scope, F>(&self, scheduler: F)
321    where
322        F: FnOnce(&mut Scope<'scope>),
323    {
324        let mut scope = Scope::new(self.clone());
325        (scheduler)(&mut scope);
326        let spawned = mem::take(&mut scope.futures)
327            .into_iter()
328            .map(|f| self.spawn(f))
329            .collect::<Vec<_>>();
330        for task in spawned {
331            task.await;
332        }
333    }
334
335    /// Get the current time.
336    ///
337    /// Calling this instead of `std::time::Instant::now` allows the use
338    /// of fake timers in tests.
339    pub fn now(&self) -> Instant {
340        self.dispatcher.now()
341    }
342
343    /// Returns a task that will complete after the given duration.
344    /// Depending on other concurrent tasks the elapsed duration may be longer
345    /// than requested.
346    pub fn timer(&self, duration: Duration) -> Task<()> {
347        if duration.is_zero() {
348            return Task::ready(());
349        }
350        let (runnable, task) = async_task::spawn(async move {}, {
351            let dispatcher = self.dispatcher.clone();
352            move |runnable| dispatcher.dispatch_after(duration, runnable)
353        });
354        runnable.schedule();
355        Task(TaskState::Spawned(task))
356    }
357
358    /// in tests, start_waiting lets you indicate which task is waiting (for debugging only)
359    #[cfg(any(test, feature = "test-support"))]
360    pub fn start_waiting(&self) {
361        self.dispatcher.as_test().unwrap().start_waiting();
362    }
363
364    /// in tests, removes the debugging data added by start_waiting
365    #[cfg(any(test, feature = "test-support"))]
366    pub fn finish_waiting(&self) {
367        self.dispatcher.as_test().unwrap().finish_waiting();
368    }
369
370    /// in tests, run an arbitrary number of tasks (determined by the SEED environment variable)
371    #[cfg(any(test, feature = "test-support"))]
372    pub fn simulate_random_delay(&self) -> impl Future<Output = ()> {
373        self.dispatcher.as_test().unwrap().simulate_random_delay()
374    }
375
376    /// in tests, indicate that a given task from `spawn_labeled` should run after everything else
377    #[cfg(any(test, feature = "test-support"))]
378    pub fn deprioritize(&self, task_label: TaskLabel) {
379        self.dispatcher.as_test().unwrap().deprioritize(task_label)
380    }
381
382    /// in tests, move time forward. This does not run any tasks, but does make `timer`s ready.
383    #[cfg(any(test, feature = "test-support"))]
384    pub fn advance_clock(&self, duration: Duration) {
385        self.dispatcher.as_test().unwrap().advance_clock(duration)
386    }
387
388    /// in tests, run one task.
389    #[cfg(any(test, feature = "test-support"))]
390    pub fn tick(&self) -> bool {
391        self.dispatcher.as_test().unwrap().tick(false)
392    }
393
394    /// in tests, run all tasks that are ready to run. If after doing so
395    /// the test still has outstanding tasks, this will panic. (See also `allow_parking`)
396    #[cfg(any(test, feature = "test-support"))]
397    pub fn run_until_parked(&self) {
398        self.dispatcher.as_test().unwrap().run_until_parked()
399    }
400
401    /// in tests, prevents `run_until_parked` from panicking if there are outstanding tasks.
402    /// This is useful when you are integrating other (non-GPUI) futures, like disk access, that
403    /// do take real async time to run.
404    #[cfg(any(test, feature = "test-support"))]
405    pub fn allow_parking(&self) {
406        self.dispatcher.as_test().unwrap().allow_parking();
407    }
408
409    /// undoes the effect of [`allow_parking`].
410    #[cfg(any(test, feature = "test-support"))]
411    pub fn forbid_parking(&self) {
412        self.dispatcher.as_test().unwrap().forbid_parking();
413    }
414
415    /// adds detail to the "parked with nothing let to run" message.
416    #[cfg(any(test, feature = "test-support"))]
417    pub fn set_waiting_hint(&self, msg: Option<String>) {
418        self.dispatcher.as_test().unwrap().set_waiting_hint(msg);
419    }
420
421    /// in tests, returns the rng used by the dispatcher and seeded by the `SEED` environment variable
422    #[cfg(any(test, feature = "test-support"))]
423    pub fn rng(&self) -> StdRng {
424        self.dispatcher.as_test().unwrap().rng()
425    }
426
427    /// How many CPUs are available to the dispatcher.
428    pub fn num_cpus(&self) -> usize {
429        #[cfg(any(test, feature = "test-support"))]
430        return 4;
431
432        #[cfg(not(any(test, feature = "test-support")))]
433        return num_cpus::get();
434    }
435
436    /// Whether we're on the main thread.
437    pub fn is_main_thread(&self) -> bool {
438        self.dispatcher.is_main_thread()
439    }
440
441    #[cfg(any(test, feature = "test-support"))]
442    /// in tests, control the number of ticks that `block_with_timeout` will run before timing out.
443    pub fn set_block_on_ticks(&self, range: std::ops::RangeInclusive<usize>) {
444        self.dispatcher.as_test().unwrap().set_block_on_ticks(range);
445    }
446}
447
448/// ForegroundExecutor runs things on the main thread.
449impl ForegroundExecutor {
450    /// Creates a new ForegroundExecutor from the given PlatformDispatcher.
451    pub fn new(dispatcher: Arc<dyn PlatformDispatcher>) -> Self {
452        Self {
453            dispatcher,
454            not_send: PhantomData,
455        }
456    }
457
458    /// Enqueues the given Task to run on the main thread at some point in the future.
459    #[track_caller]
460    pub fn spawn<R>(&self, future: impl Future<Output = R> + 'static) -> Task<R>
461    where
462        R: 'static,
463    {
464        let dispatcher = self.dispatcher.clone();
465
466        #[track_caller]
467        fn inner<R: 'static>(
468            dispatcher: Arc<dyn PlatformDispatcher>,
469            future: AnyLocalFuture<R>,
470        ) -> Task<R> {
471            let (runnable, task) = spawn_local_with_source_location(future, move |runnable| {
472                dispatcher.dispatch_on_main_thread(runnable)
473            });
474            runnable.schedule();
475            Task(TaskState::Spawned(task))
476        }
477        inner::<R>(dispatcher, Box::pin(future))
478    }
479}
480
481/// Variant of `async_task::spawn_local` that includes the source location of the spawn in panics.
482///
483/// Copy-modified from:
484/// https://github.com/smol-rs/async-task/blob/ca9dbe1db9c422fd765847fa91306e30a6bb58a9/src/runnable.rs#L405
485#[track_caller]
486fn spawn_local_with_source_location<Fut, S>(
487    future: Fut,
488    schedule: S,
489) -> (Runnable<()>, async_task::Task<Fut::Output, ()>)
490where
491    Fut: Future + 'static,
492    Fut::Output: 'static,
493    S: async_task::Schedule<()> + Send + Sync + 'static,
494{
495    #[inline]
496    fn thread_id() -> ThreadId {
497        std::thread_local! {
498            static ID: ThreadId = thread::current().id();
499        }
500        ID.try_with(|id| *id)
501            .unwrap_or_else(|_| thread::current().id())
502    }
503
504    struct Checked<F> {
505        id: ThreadId,
506        inner: ManuallyDrop<F>,
507        location: &'static Location<'static>,
508    }
509
510    impl<F> Drop for Checked<F> {
511        fn drop(&mut self) {
512            assert!(
513                self.id == thread_id(),
514                "local task dropped by a thread that didn't spawn it. Task spawned at {}",
515                self.location
516            );
517            unsafe {
518                ManuallyDrop::drop(&mut self.inner);
519            }
520        }
521    }
522
523    impl<F: Future> Future for Checked<F> {
524        type Output = F::Output;
525
526        fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
527            assert!(
528                self.id == thread_id(),
529                "local task polled by a thread that didn't spawn it. Task spawned at {}",
530                self.location
531            );
532            unsafe { self.map_unchecked_mut(|c| &mut *c.inner).poll(cx) }
533        }
534    }
535
536    // Wrap the future into one that checks which thread it's on.
537    let future = Checked {
538        id: thread_id(),
539        inner: ManuallyDrop::new(future),
540        location: Location::caller(),
541    };
542
543    unsafe { async_task::spawn_unchecked(future, schedule) }
544}
545
546/// Scope manages a set of tasks that are enqueued and waited on together. See [`BackgroundExecutor::scoped`].
547pub struct Scope<'a> {
548    executor: BackgroundExecutor,
549    futures: Vec<Pin<Box<dyn Future<Output = ()> + Send + 'static>>>,
550    tx: Option<mpsc::Sender<()>>,
551    rx: mpsc::Receiver<()>,
552    lifetime: PhantomData<&'a ()>,
553}
554
555impl<'a> Scope<'a> {
556    fn new(executor: BackgroundExecutor) -> Self {
557        let (tx, rx) = mpsc::channel(1);
558        Self {
559            executor,
560            tx: Some(tx),
561            rx,
562            futures: Default::default(),
563            lifetime: PhantomData,
564        }
565    }
566
567    /// How many CPUs are available to the dispatcher.
568    pub fn num_cpus(&self) -> usize {
569        self.executor.num_cpus()
570    }
571
572    /// Spawn a future into this scope.
573    pub fn spawn<F>(&mut self, f: F)
574    where
575        F: Future<Output = ()> + Send + 'a,
576    {
577        let tx = self.tx.clone().unwrap();
578
579        // SAFETY: The 'a lifetime is guaranteed to outlive any of these futures because
580        // dropping this `Scope` blocks until all of the futures have resolved.
581        let f = unsafe {
582            mem::transmute::<
583                Pin<Box<dyn Future<Output = ()> + Send + 'a>>,
584                Pin<Box<dyn Future<Output = ()> + Send + 'static>>,
585            >(Box::pin(async move {
586                f.await;
587                drop(tx);
588            }))
589        };
590        self.futures.push(f);
591    }
592}
593
594impl<'a> Drop for Scope<'a> {
595    fn drop(&mut self) {
596        self.tx.take().unwrap();
597
598        // Wait until the channel is closed, which means that all of the spawned
599        // futures have resolved.
600        self.executor.block(self.rx.next());
601    }
602}