executor.rs

  1use crate::{AppContext, PlatformDispatcher};
  2use futures::{channel::mpsc, pin_mut, FutureExt};
  3use smol::prelude::*;
  4use std::{
  5    fmt::Debug,
  6    marker::PhantomData,
  7    mem,
  8    num::NonZeroUsize,
  9    pin::Pin,
 10    rc::Rc,
 11    sync::{
 12        atomic::{AtomicBool, AtomicUsize, Ordering::SeqCst},
 13        Arc,
 14    },
 15    task::{Context, Poll},
 16    time::Duration,
 17};
 18use util::TryFutureExt;
 19use waker_fn::waker_fn;
 20
 21#[cfg(any(test, feature = "test-support"))]
 22use rand::rngs::StdRng;
 23
 24#[derive(Clone)]
 25pub struct BackgroundExecutor {
 26    dispatcher: Arc<dyn PlatformDispatcher>,
 27}
 28
 29#[derive(Clone)]
 30pub struct ForegroundExecutor {
 31    dispatcher: Arc<dyn PlatformDispatcher>,
 32    not_send: PhantomData<Rc<()>>,
 33}
 34
 35#[must_use]
 36#[derive(Debug)]
 37pub enum Task<T> {
 38    Ready(Option<T>),
 39    Spawned(async_task::Task<T>),
 40}
 41
 42impl<T> Task<T> {
 43    pub fn ready(val: T) -> Self {
 44        Task::Ready(Some(val))
 45    }
 46
 47    pub fn detach(self) {
 48        match self {
 49            Task::Ready(_) => {}
 50            Task::Spawned(task) => task.detach(),
 51        }
 52    }
 53}
 54
 55impl<E, T> Task<Result<T, E>>
 56where
 57    T: 'static,
 58    E: 'static + Debug,
 59{
 60    #[track_caller]
 61    pub fn detach_and_log_err(self, cx: &mut AppContext) {
 62        let location = core::panic::Location::caller();
 63        cx.foreground_executor()
 64            .spawn(self.log_tracked_err(*location))
 65            .detach();
 66    }
 67}
 68
 69impl<T> Future for Task<T> {
 70    type Output = T;
 71
 72    fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
 73        match unsafe { self.get_unchecked_mut() } {
 74            Task::Ready(val) => Poll::Ready(val.take().unwrap()),
 75            Task::Spawned(task) => task.poll(cx),
 76        }
 77    }
 78}
 79
 80#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
 81pub struct TaskLabel(NonZeroUsize);
 82
 83impl Default for TaskLabel {
 84    fn default() -> Self {
 85        Self::new()
 86    }
 87}
 88
 89impl TaskLabel {
 90    pub fn new() -> Self {
 91        static NEXT_TASK_LABEL: AtomicUsize = AtomicUsize::new(1);
 92        Self(NEXT_TASK_LABEL.fetch_add(1, SeqCst).try_into().unwrap())
 93    }
 94}
 95
 96type AnyLocalFuture<R> = Pin<Box<dyn 'static + Future<Output = R>>>;
 97
 98type AnyFuture<R> = Pin<Box<dyn 'static + Send + Future<Output = R>>>;
 99
100impl BackgroundExecutor {
101    pub fn new(dispatcher: Arc<dyn PlatformDispatcher>) -> Self {
102        Self { dispatcher }
103    }
104
105    /// Enqueues the given future to be run to completion on a background thread.
106    pub fn spawn<R>(&self, future: impl Future<Output = R> + Send + 'static) -> Task<R>
107    where
108        R: Send + 'static,
109    {
110        self.spawn_internal::<R>(Box::pin(future), None)
111    }
112
113    /// Enqueues the given future to be run to completion on a background thread.
114    /// The given label can be used to control the priority of the task in tests.
115    pub fn spawn_labeled<R>(
116        &self,
117        label: TaskLabel,
118        future: impl Future<Output = R> + Send + 'static,
119    ) -> Task<R>
120    where
121        R: Send + 'static,
122    {
123        self.spawn_internal::<R>(Box::pin(future), Some(label))
124    }
125
126    fn spawn_internal<R: Send + 'static>(
127        &self,
128        future: AnyFuture<R>,
129        label: Option<TaskLabel>,
130    ) -> Task<R> {
131        let dispatcher = self.dispatcher.clone();
132        let (runnable, task) =
133            async_task::spawn(future, move |runnable| dispatcher.dispatch(runnable, label));
134        runnable.schedule();
135        Task::Spawned(task)
136    }
137
138    #[cfg(any(test, feature = "test-support"))]
139    #[track_caller]
140    pub fn block_test<R>(&self, future: impl Future<Output = R>) -> R {
141        if let Ok(value) = self.block_internal(false, future, usize::MAX) {
142            value
143        } else {
144            unreachable!()
145        }
146    }
147
148    pub fn block<R>(&self, future: impl Future<Output = R>) -> R {
149        if let Ok(value) = self.block_internal(true, future, usize::MAX) {
150            value
151        } else {
152            unreachable!()
153        }
154    }
155
156    #[track_caller]
157    pub(crate) fn block_internal<R>(
158        &self,
159        background_only: bool,
160        future: impl Future<Output = R>,
161        mut max_ticks: usize,
162    ) -> Result<R, ()> {
163        pin_mut!(future);
164        let unparker = self.dispatcher.unparker();
165        let awoken = Arc::new(AtomicBool::new(false));
166
167        let waker = waker_fn({
168            let awoken = awoken.clone();
169            move || {
170                awoken.store(true, SeqCst);
171                unparker.unpark();
172            }
173        });
174        let mut cx = std::task::Context::from_waker(&waker);
175
176        loop {
177            match future.as_mut().poll(&mut cx) {
178                Poll::Ready(result) => return Ok(result),
179                Poll::Pending => {
180                    if max_ticks == 0 {
181                        return Err(());
182                    }
183                    max_ticks -= 1;
184
185                    if !self.dispatcher.tick(background_only) {
186                        if awoken.swap(false, SeqCst) {
187                            continue;
188                        }
189
190                        #[cfg(any(test, feature = "test-support"))]
191                        if let Some(test) = self.dispatcher.as_test() {
192                            if !test.parking_allowed() {
193                                let mut backtrace_message = String::new();
194                                if let Some(backtrace) = test.waiting_backtrace() {
195                                    backtrace_message =
196                                        format!("\nbacktrace of waiting future:\n{:?}", backtrace);
197                                }
198                                panic!("parked with nothing left to run\n{:?}", backtrace_message)
199                            }
200                        }
201
202                        self.dispatcher.park();
203                    }
204                }
205            }
206        }
207    }
208
209    pub fn block_with_timeout<R>(
210        &self,
211        duration: Duration,
212        future: impl Future<Output = R>,
213    ) -> Result<R, impl Future<Output = R>> {
214        let mut future = Box::pin(future.fuse());
215        if duration.is_zero() {
216            return Err(future);
217        }
218
219        #[cfg(any(test, feature = "test-support"))]
220        let max_ticks = self
221            .dispatcher
222            .as_test()
223            .map_or(usize::MAX, |dispatcher| dispatcher.gen_block_on_ticks());
224        #[cfg(not(any(test, feature = "test-support")))]
225        let max_ticks = usize::MAX;
226
227        let mut timer = self.timer(duration).fuse();
228
229        let timeout = async {
230            futures::select_biased! {
231                value = future => Ok(value),
232                _ = timer => Err(()),
233            }
234        };
235        match self.block_internal(true, timeout, max_ticks) {
236            Ok(Ok(value)) => Ok(value),
237            _ => Err(future),
238        }
239    }
240
241    pub async fn scoped<'scope, F>(&self, scheduler: F)
242    where
243        F: FnOnce(&mut Scope<'scope>),
244    {
245        let mut scope = Scope::new(self.clone());
246        (scheduler)(&mut scope);
247        let spawned = mem::take(&mut scope.futures)
248            .into_iter()
249            .map(|f| self.spawn(f))
250            .collect::<Vec<_>>();
251        for task in spawned {
252            task.await;
253        }
254    }
255
256    pub fn timer(&self, duration: Duration) -> Task<()> {
257        let (runnable, task) = async_task::spawn(async move {}, {
258            let dispatcher = self.dispatcher.clone();
259            move |runnable| dispatcher.dispatch_after(duration, runnable)
260        });
261        runnable.schedule();
262        Task::Spawned(task)
263    }
264
265    #[cfg(any(test, feature = "test-support"))]
266    pub fn start_waiting(&self) {
267        self.dispatcher.as_test().unwrap().start_waiting();
268    }
269
270    #[cfg(any(test, feature = "test-support"))]
271    pub fn finish_waiting(&self) {
272        self.dispatcher.as_test().unwrap().finish_waiting();
273    }
274
275    #[cfg(any(test, feature = "test-support"))]
276    pub fn simulate_random_delay(&self) -> impl Future<Output = ()> {
277        self.dispatcher.as_test().unwrap().simulate_random_delay()
278    }
279
280    #[cfg(any(test, feature = "test-support"))]
281    pub fn deprioritize(&self, task_label: TaskLabel) {
282        self.dispatcher.as_test().unwrap().deprioritize(task_label)
283    }
284
285    #[cfg(any(test, feature = "test-support"))]
286    pub fn advance_clock(&self, duration: Duration) {
287        self.dispatcher.as_test().unwrap().advance_clock(duration)
288    }
289
290    #[cfg(any(test, feature = "test-support"))]
291    pub fn tick(&self) -> bool {
292        self.dispatcher.as_test().unwrap().tick(false)
293    }
294
295    #[cfg(any(test, feature = "test-support"))]
296    pub fn run_until_parked(&self) {
297        self.dispatcher.as_test().unwrap().run_until_parked()
298    }
299
300    #[cfg(any(test, feature = "test-support"))]
301    pub fn allow_parking(&self) {
302        self.dispatcher.as_test().unwrap().allow_parking();
303    }
304
305    #[cfg(any(test, feature = "test-support"))]
306    pub fn rng(&self) -> StdRng {
307        self.dispatcher.as_test().unwrap().rng()
308    }
309
310    pub fn num_cpus(&self) -> usize {
311        num_cpus::get()
312    }
313
314    pub fn is_main_thread(&self) -> bool {
315        self.dispatcher.is_main_thread()
316    }
317
318    #[cfg(any(test, feature = "test-support"))]
319    pub fn set_block_on_ticks(&self, range: std::ops::RangeInclusive<usize>) {
320        self.dispatcher.as_test().unwrap().set_block_on_ticks(range);
321    }
322}
323
324impl ForegroundExecutor {
325    pub fn new(dispatcher: Arc<dyn PlatformDispatcher>) -> Self {
326        Self {
327            dispatcher,
328            not_send: PhantomData,
329        }
330    }
331
332    /// Enqueues the given closure to be run on any thread. The closure returns
333    /// a future which will be run to completion on any available thread.
334    pub fn spawn<R>(&self, future: impl Future<Output = R> + 'static) -> Task<R>
335    where
336        R: 'static,
337    {
338        let dispatcher = self.dispatcher.clone();
339        fn inner<R: 'static>(
340            dispatcher: Arc<dyn PlatformDispatcher>,
341            future: AnyLocalFuture<R>,
342        ) -> Task<R> {
343            let (runnable, task) = async_task::spawn_local(future, move |runnable| {
344                dispatcher.dispatch_on_main_thread(runnable)
345            });
346            runnable.schedule();
347            Task::Spawned(task)
348        }
349        inner::<R>(dispatcher, Box::pin(future))
350    }
351}
352
353pub struct Scope<'a> {
354    executor: BackgroundExecutor,
355    futures: Vec<Pin<Box<dyn Future<Output = ()> + Send + 'static>>>,
356    tx: Option<mpsc::Sender<()>>,
357    rx: mpsc::Receiver<()>,
358    lifetime: PhantomData<&'a ()>,
359}
360
361impl<'a> Scope<'a> {
362    fn new(executor: BackgroundExecutor) -> Self {
363        let (tx, rx) = mpsc::channel(1);
364        Self {
365            executor,
366            tx: Some(tx),
367            rx,
368            futures: Default::default(),
369            lifetime: PhantomData,
370        }
371    }
372
373    pub fn spawn<F>(&mut self, f: F)
374    where
375        F: Future<Output = ()> + Send + 'a,
376    {
377        let tx = self.tx.clone().unwrap();
378
379        // Safety: The 'a lifetime is guaranteed to outlive any of these futures because
380        // dropping this `Scope` blocks until all of the futures have resolved.
381        let f = unsafe {
382            mem::transmute::<
383                Pin<Box<dyn Future<Output = ()> + Send + 'a>>,
384                Pin<Box<dyn Future<Output = ()> + Send + 'static>>,
385            >(Box::pin(async move {
386                f.await;
387                drop(tx);
388            }))
389        };
390        self.futures.push(f);
391    }
392}
393
394impl<'a> Drop for Scope<'a> {
395    fn drop(&mut self) {
396        self.tx.take().unwrap();
397
398        // Wait until the channel is closed, which means that all of the spawned
399        // futures have resolved.
400        self.executor.block(self.rx.next());
401    }
402}