#![warn(
missing_docs,
missing_debug_implementations,
rust_2018_idioms,
clippy::undocumented_unsafe_blocks
)]
#![doc(
html_favicon_url = "https://raw.githubusercontent.com/smol-rs/smol/master/assets/images/logo_fullsize_transparent.png"
)]
#![doc(
html_logo_url = "https://raw.githubusercontent.com/smol-rs/smol/master/assets/images/logo_fullsize_transparent.png"
)]
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
use std::fmt;
use std::marker::PhantomData;
use std::panic::{RefUnwindSafe, UnwindSafe};
use std::rc::Rc;
use std::sync::atomic::{AtomicBool, AtomicPtr, Ordering};
use std::sync::{Arc, Mutex, RwLock, TryLockError};
use std::task::{Poll, Waker};
use async_task::{Builder, Runnable};
use concurrent_queue::ConcurrentQueue;
use futures_lite::{future, prelude::*};
use slab::Slab;
#[cfg(feature = "static")]
mod static_executors;
#[doc(no_inline)]
pub use async_task::{FallibleTask, Task};
#[cfg(feature = "static")]
pub use static_executors::*;
pub struct Executor<'a> {
state: AtomicPtr<State>,
_marker: PhantomData<std::cell::UnsafeCell<&'a ()>>,
}
unsafe impl Send for Executor<'_> {}
unsafe impl Sync for Executor<'_> {}
impl UnwindSafe for Executor<'_> {}
impl RefUnwindSafe for Executor<'_> {}
impl fmt::Debug for Executor<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
debug_executor(self, "Executor", f)
}
}
impl<'a> Executor<'a> {
pub const fn new() -> Executor<'a> {
Executor {
state: AtomicPtr::new(std::ptr::null_mut()),
_marker: PhantomData,
}
}
pub fn is_empty(&self) -> bool {
self.state().active.lock().unwrap().is_empty()
}
pub fn spawn<T: Send + 'a>(&self, future: impl Future<Output = T> + Send + 'a) -> Task<T> {
let mut active = self.state().active.lock().unwrap();
unsafe { self.spawn_inner(future, &mut active) }
}
pub fn spawn_many<T: Send + 'a, F: Future<Output = T> + Send + 'a>(
&self,
futures: impl IntoIterator<Item = F>,
handles: &mut impl Extend<Task<F::Output>>,
) {
let mut active = Some(self.state().active.lock().unwrap());
let tasks = futures.into_iter().enumerate().map(move |(i, future)| {
let task = unsafe { self.spawn_inner(future, active.as_mut().unwrap()) };
if i.wrapping_sub(1) % 500 == 0 {
drop(active.take());
active = Some(self.state().active.lock().unwrap());
}
task
});
handles.extend(tasks);
}
unsafe fn spawn_inner<T: 'a>(
&self,
future: impl Future<Output = T> + 'a,
active: &mut Slab<Waker>,
) -> Task<T> {
let entry = active.vacant_entry();
let index = entry.key();
let state = self.state_as_arc();
let future = async move {
let _guard = CallOnDrop(move || drop(state.active.lock().unwrap().try_remove(index)));
future.await
};
let (runnable, task) = Builder::new()
.propagate_panic(true)
.spawn_unchecked(|()| future, self.schedule());
entry.insert(runnable.waker());
runnable.schedule();
task
}
pub fn try_tick(&self) -> bool {
self.state().try_tick()
}
pub async fn tick(&self) {
self.state().tick().await;
}
pub async fn run<T>(&self, future: impl Future<Output = T>) -> T {
self.state().run(future).await
}
fn schedule(&self) -> impl Fn(Runnable) + Send + Sync + 'static {
let state = self.state_as_arc();
move |runnable| {
state.queue.push(runnable).unwrap();
state.notify();
}
}
#[inline]
fn state_ptr(&self) -> *const State {
#[cold]
fn alloc_state(atomic_ptr: &AtomicPtr<State>) -> *mut State {
let state = Arc::new(State::new());
let ptr = Arc::into_raw(state) as *mut State;
if let Err(actual) = atomic_ptr.compare_exchange(
std::ptr::null_mut(),
ptr,
Ordering::AcqRel,
Ordering::Acquire,
) {
drop(unsafe { Arc::from_raw(ptr) });
actual
} else {
ptr
}
}
let mut ptr = self.state.load(Ordering::Acquire);
if ptr.is_null() {
ptr = alloc_state(&self.state);
}
ptr
}
#[inline]
fn state(&self) -> &State {
unsafe { &*self.state_ptr() }
}
#[inline]
fn state_as_arc(&self) -> Arc<State> {
let arc = unsafe { Arc::from_raw(self.state_ptr()) };
let clone = arc.clone();
std::mem::forget(arc);
clone
}
}
impl Drop for Executor<'_> {
fn drop(&mut self) {
let ptr = *self.state.get_mut();
if ptr.is_null() {
return;
}
let state = unsafe { Arc::from_raw(ptr) };
let mut active = state.active.lock().unwrap_or_else(|e| e.into_inner());
for w in active.drain() {
w.wake();
}
drop(active);
while state.queue.pop().is_ok() {}
}
}
impl<'a> Default for Executor<'a> {
fn default() -> Executor<'a> {
Executor::new()
}
}
pub struct LocalExecutor<'a> {
inner: Executor<'a>,
_marker: PhantomData<Rc<()>>,
}
impl UnwindSafe for LocalExecutor<'_> {}
impl RefUnwindSafe for LocalExecutor<'_> {}
impl fmt::Debug for LocalExecutor<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
debug_executor(&self.inner, "LocalExecutor", f)
}
}
impl<'a> LocalExecutor<'a> {
pub const fn new() -> LocalExecutor<'a> {
LocalExecutor {
inner: Executor::new(),
_marker: PhantomData,
}
}
pub fn is_empty(&self) -> bool {
self.inner().is_empty()
}
pub fn spawn<T: 'a>(&self, future: impl Future<Output = T> + 'a) -> Task<T> {
let mut active = self.inner().state().active.lock().unwrap();
unsafe { self.inner().spawn_inner(future, &mut active) }
}
pub fn spawn_many<T: Send + 'a, F: Future<Output = T> + Send + 'a>(
&self,
futures: impl IntoIterator<Item = F>,
handles: &mut impl Extend<Task<F::Output>>,
) {
let mut active = self.inner().state().active.lock().unwrap();
let tasks = futures.into_iter().map(|future| {
unsafe { self.inner().spawn_inner(future, &mut active) }
});
handles.extend(tasks);
}
pub fn try_tick(&self) -> bool {
self.inner().try_tick()
}
pub async fn tick(&self) {
self.inner().tick().await
}
pub async fn run<T>(&self, future: impl Future<Output = T>) -> T {
self.inner().run(future).await
}
fn inner(&self) -> &Executor<'a> {
&self.inner
}
}
impl<'a> Default for LocalExecutor<'a> {
fn default() -> LocalExecutor<'a> {
LocalExecutor::new()
}
}
struct State {
queue: ConcurrentQueue<Runnable>,
local_queues: RwLock<Vec<Arc<ConcurrentQueue<Runnable>>>>,
notified: AtomicBool,
sleepers: Mutex<Sleepers>,
active: Mutex<Slab<Waker>>,
}
impl State {
const fn new() -> State {
State {
queue: ConcurrentQueue::unbounded(),
local_queues: RwLock::new(Vec::new()),
notified: AtomicBool::new(true),
sleepers: Mutex::new(Sleepers {
count: 0,
wakers: Vec::new(),
free_ids: Vec::new(),
}),
active: Mutex::new(Slab::new()),
}
}
#[inline]
fn notify(&self) {
if self
.notified
.compare_exchange(false, true, Ordering::AcqRel, Ordering::Acquire)
.is_ok()
{
let waker = self.sleepers.lock().unwrap().notify();
if let Some(w) = waker {
w.wake();
}
}
}
pub(crate) fn try_tick(&self) -> bool {
match self.queue.pop() {
Err(_) => false,
Ok(runnable) => {
self.notify();
runnable.run();
true
}
}
}
pub(crate) async fn tick(&self) {
let runnable = Ticker::new(self).runnable().await;
runnable.run();
}
pub async fn run<T>(&self, future: impl Future<Output = T>) -> T {
let mut runner = Runner::new(self);
let mut rng = fastrand::Rng::new();
let run_forever = async {
loop {
for _ in 0..200 {
let runnable = runner.runnable(&mut rng).await;
runnable.run();
}
future::yield_now().await;
}
};
future.or(run_forever).await
}
}
struct Sleepers {
count: usize,
wakers: Vec<(usize, Waker)>,
free_ids: Vec<usize>,
}
impl Sleepers {
fn insert(&mut self, waker: &Waker) -> usize {
let id = match self.free_ids.pop() {
Some(id) => id,
None => self.count + 1,
};
self.count += 1;
self.wakers.push((id, waker.clone()));
id
}
fn update(&mut self, id: usize, waker: &Waker) -> bool {
for item in &mut self.wakers {
if item.0 == id {
item.1.clone_from(waker);
return false;
}
}
self.wakers.push((id, waker.clone()));
true
}
fn remove(&mut self, id: usize) -> bool {
self.count -= 1;
self.free_ids.push(id);
for i in (0..self.wakers.len()).rev() {
if self.wakers[i].0 == id {
self.wakers.remove(i);
return false;
}
}
true
}
fn is_notified(&self) -> bool {
self.count == 0 || self.count > self.wakers.len()
}
fn notify(&mut self) -> Option<Waker> {
if self.wakers.len() == self.count {
self.wakers.pop().map(|item| item.1)
} else {
None
}
}
}
struct Ticker<'a> {
state: &'a State,
sleeping: usize,
}
impl Ticker<'_> {
fn new(state: &State) -> Ticker<'_> {
Ticker { state, sleeping: 0 }
}
fn sleep(&mut self, waker: &Waker) -> bool {
let mut sleepers = self.state.sleepers.lock().unwrap();
match self.sleeping {
0 => {
self.sleeping = sleepers.insert(waker);
}
id => {
if !sleepers.update(id, waker) {
return false;
}
}
}
self.state
.notified
.store(sleepers.is_notified(), Ordering::Release);
true
}
fn wake(&mut self) {
if self.sleeping != 0 {
let mut sleepers = self.state.sleepers.lock().unwrap();
sleepers.remove(self.sleeping);
self.state
.notified
.store(sleepers.is_notified(), Ordering::Release);
}
self.sleeping = 0;
}
async fn runnable(&mut self) -> Runnable {
self.runnable_with(|| self.state.queue.pop().ok()).await
}
async fn runnable_with(&mut self, mut search: impl FnMut() -> Option<Runnable>) -> Runnable {
future::poll_fn(|cx| {
loop {
match search() {
None => {
if !self.sleep(cx.waker()) {
return Poll::Pending;
}
}
Some(r) => {
self.wake();
self.state.notify();
return Poll::Ready(r);
}
}
}
})
.await
}
}
impl Drop for Ticker<'_> {
fn drop(&mut self) {
if self.sleeping != 0 {
let mut sleepers = self.state.sleepers.lock().unwrap();
let notified = sleepers.remove(self.sleeping);
self.state
.notified
.store(sleepers.is_notified(), Ordering::Release);
if notified {
drop(sleepers);
self.state.notify();
}
}
}
}
struct Runner<'a> {
state: &'a State,
ticker: Ticker<'a>,
local: Arc<ConcurrentQueue<Runnable>>,
ticks: usize,
}
impl Runner<'_> {
fn new(state: &State) -> Runner<'_> {
let runner = Runner {
state,
ticker: Ticker::new(state),
local: Arc::new(ConcurrentQueue::bounded(512)),
ticks: 0,
};
state
.local_queues
.write()
.unwrap()
.push(runner.local.clone());
runner
}
async fn runnable(&mut self, rng: &mut fastrand::Rng) -> Runnable {
let runnable = self
.ticker
.runnable_with(|| {
if let Ok(r) = self.local.pop() {
return Some(r);
}
if let Ok(r) = self.state.queue.pop() {
steal(&self.state.queue, &self.local);
return Some(r);
}
let local_queues = self.state.local_queues.read().unwrap();
let n = local_queues.len();
let start = rng.usize(..n);
let iter = local_queues
.iter()
.chain(local_queues.iter())
.skip(start)
.take(n);
let iter = iter.filter(|local| !Arc::ptr_eq(local, &self.local));
for local in iter {
steal(local, &self.local);
if let Ok(r) = self.local.pop() {
return Some(r);
}
}
None
})
.await;
self.ticks = self.ticks.wrapping_add(1);
if self.ticks % 64 == 0 {
steal(&self.state.queue, &self.local);
}
runnable
}
}
impl Drop for Runner<'_> {
fn drop(&mut self) {
self.state
.local_queues
.write()
.unwrap()
.retain(|local| !Arc::ptr_eq(local, &self.local));
while let Ok(r) = self.local.pop() {
r.schedule();
}
}
}
fn steal<T>(src: &ConcurrentQueue<T>, dest: &ConcurrentQueue<T>) {
let mut count = (src.len() + 1) / 2;
if count > 0 {
if let Some(cap) = dest.capacity() {
count = count.min(cap - dest.len());
}
for _ in 0..count {
if let Ok(t) = src.pop() {
assert!(dest.push(t).is_ok());
} else {
break;
}
}
}
}
fn debug_executor(executor: &Executor<'_>, name: &str, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let ptr = executor.state.load(Ordering::Acquire);
if ptr.is_null() {
struct Uninitialized;
impl fmt::Debug for Uninitialized {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("<uninitialized>")
}
}
return f.debug_tuple(name).field(&Uninitialized).finish();
}
let state = unsafe { &*ptr };
debug_state(state, name, f)
}
fn debug_state(state: &State, name: &str, f: &mut fmt::Formatter<'_>) -> fmt::Result {
struct ActiveTasks<'a>(&'a Mutex<Slab<Waker>>);
impl fmt::Debug for ActiveTasks<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.0.try_lock() {
Ok(lock) => fmt::Debug::fmt(&lock.len(), f),
Err(TryLockError::WouldBlock) => f.write_str("<locked>"),
Err(TryLockError::Poisoned(_)) => f.write_str("<poisoned>"),
}
}
}
struct LocalRunners<'a>(&'a RwLock<Vec<Arc<ConcurrentQueue<Runnable>>>>);
impl fmt::Debug for LocalRunners<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.0.try_read() {
Ok(lock) => f
.debug_list()
.entries(lock.iter().map(|queue| queue.len()))
.finish(),
Err(TryLockError::WouldBlock) => f.write_str("<locked>"),
Err(TryLockError::Poisoned(_)) => f.write_str("<poisoned>"),
}
}
}
struct SleepCount<'a>(&'a Mutex<Sleepers>);
impl fmt::Debug for SleepCount<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.0.try_lock() {
Ok(lock) => fmt::Debug::fmt(&lock.count, f),
Err(TryLockError::WouldBlock) => f.write_str("<locked>"),
Err(TryLockError::Poisoned(_)) => f.write_str("<poisoned>"),
}
}
}
f.debug_struct(name)
.field("active", &ActiveTasks(&state.active))
.field("global_tasks", &state.queue.len())
.field("local_runners", &LocalRunners(&state.local_queues))
.field("sleepers", &SleepCount(&state.sleepers))
.finish()
}
struct CallOnDrop<F: FnMut()>(F);
impl<F: FnMut()> Drop for CallOnDrop<F> {
fn drop(&mut self) {
(self.0)();
}
}
fn _ensure_send_and_sync() {
use futures_lite::future::pending;
fn is_send<T: Send>(_: T) {}
fn is_sync<T: Sync>(_: T) {}
fn is_static<T: 'static>(_: T) {}
is_send::<Executor<'_>>(Executor::new());
is_sync::<Executor<'_>>(Executor::new());
let ex = Executor::new();
is_send(ex.run(pending::<()>()));
is_sync(ex.run(pending::<()>()));
is_send(ex.tick());
is_sync(ex.tick());
is_send(ex.schedule());
is_sync(ex.schedule());
is_static(ex.schedule());
fn _negative_test() {}
}