pub struct Pool<T, F = fn() -> T>(alloc::boxed::Box<inner::Pool<T, F>>);
impl<T, F> Pool<T, F> {
pub fn new(create: F) -> Pool<T, F> {
Pool(alloc::boxed::Box::new(inner::Pool::new(create)))
}
}
impl<T: Send, F: Fn() -> T> Pool<T, F> {
#[inline]
pub fn get(&self) -> PoolGuard<'_, T, F> {
PoolGuard(self.0.get())
}
}
impl<T: core::fmt::Debug, F> core::fmt::Debug for Pool<T, F> {
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
f.debug_tuple("Pool").field(&self.0).finish()
}
}
pub struct PoolGuard<'a, T: Send, F: Fn() -> T>(inner::PoolGuard<'a, T, F>);
impl<'a, T: Send, F: Fn() -> T> PoolGuard<'a, T, F> {
#[inline]
pub fn put(this: PoolGuard<'_, T, F>) {
inner::PoolGuard::put(this.0);
}
}
impl<'a, T: Send, F: Fn() -> T> core::ops::Deref for PoolGuard<'a, T, F> {
type Target = T;
#[inline]
fn deref(&self) -> &T {
self.0.value()
}
}
impl<'a, T: Send, F: Fn() -> T> core::ops::DerefMut for PoolGuard<'a, T, F> {
#[inline]
fn deref_mut(&mut self) -> &mut T {
self.0.value_mut()
}
}
impl<'a, T: Send + core::fmt::Debug, F: Fn() -> T> core::fmt::Debug
for PoolGuard<'a, T, F>
{
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
f.debug_tuple("PoolGuard").field(&self.0).finish()
}
}
#[cfg(feature = "std")]
mod inner {
use core::{
cell::UnsafeCell,
panic::{RefUnwindSafe, UnwindSafe},
sync::atomic::{AtomicUsize, Ordering},
};
use alloc::{boxed::Box, vec, vec::Vec};
use std::{sync::Mutex, thread_local};
static COUNTER: AtomicUsize = AtomicUsize::new(3);
static THREAD_ID_UNOWNED: usize = 0;
static THREAD_ID_INUSE: usize = 1;
static THREAD_ID_DROPPED: usize = 2;
const MAX_POOL_STACKS: usize = 8;
thread_local!(
static THREAD_ID: usize = {
let next = COUNTER.fetch_add(1, Ordering::Relaxed);
if next == 0 {
panic!("regex: thread ID allocation space exhausted");
}
next
};
);
#[derive(Debug)]
#[repr(C, align(64))]
struct CacheLine<T>(T);
pub(super) struct Pool<T, F> {
create: F,
stacks: Vec<CacheLine<Mutex<Vec<Box<T>>>>>,
owner: AtomicUsize,
owner_val: UnsafeCell<Option<T>>,
}
unsafe impl<T: Send, F: Send + Sync> Sync for Pool<T, F> {}
impl<T: UnwindSafe, F: UnwindSafe + RefUnwindSafe> UnwindSafe for Pool<T, F> {}
impl<T: UnwindSafe, F: UnwindSafe + RefUnwindSafe> RefUnwindSafe
for Pool<T, F>
{
}
impl<T, F> Pool<T, F> {
pub(super) fn new(create: F) -> Pool<T, F> {
let mut stacks = Vec::with_capacity(MAX_POOL_STACKS);
for _ in 0..stacks.capacity() {
stacks.push(CacheLine(Mutex::new(vec![])));
}
let owner = AtomicUsize::new(THREAD_ID_UNOWNED);
let owner_val = UnsafeCell::new(None); Pool { create, stacks, owner, owner_val }
}
}
impl<T: Send, F: Fn() -> T> Pool<T, F> {
#[inline]
pub(super) fn get(&self) -> PoolGuard<'_, T, F> {
let caller = THREAD_ID.with(|id| *id);
let owner = self.owner.load(Ordering::Acquire);
if caller == owner {
self.owner.store(THREAD_ID_INUSE, Ordering::Release);
return self.guard_owned(caller);
}
self.get_slow(caller, owner)
}
#[cold]
fn get_slow(
&self,
caller: usize,
owner: usize,
) -> PoolGuard<'_, T, F> {
if owner == THREAD_ID_UNOWNED {
let res = self.owner.compare_exchange(
THREAD_ID_UNOWNED,
THREAD_ID_INUSE,
Ordering::AcqRel,
Ordering::Acquire,
);
if res.is_ok() {
unsafe {
*self.owner_val.get() = Some((self.create)());
}
return self.guard_owned(caller);
}
}
let stack_id = caller % self.stacks.len();
for _ in 0..1 {
let mut stack = match self.stacks[stack_id].0.try_lock() {
Err(_) => continue,
Ok(stack) => stack,
};
if let Some(value) = stack.pop() {
return self.guard_stack(value);
}
drop(stack);
let value = Box::new((self.create)());
return self.guard_stack(value);
}
self.guard_stack_transient(Box::new((self.create)()))
}
#[inline]
fn put_value(&self, value: Box<T>) {
let caller = THREAD_ID.with(|id| *id);
let stack_id = caller % self.stacks.len();
for _ in 0..10 {
let mut stack = match self.stacks[stack_id].0.try_lock() {
Err(_) => continue,
Ok(stack) => stack,
};
stack.push(value);
return;
}
}
#[inline]
fn guard_owned(&self, caller: usize) -> PoolGuard<'_, T, F> {
PoolGuard { pool: self, value: Err(caller), discard: false }
}
#[inline]
fn guard_stack(&self, value: Box<T>) -> PoolGuard<'_, T, F> {
PoolGuard { pool: self, value: Ok(value), discard: false }
}
#[inline]
fn guard_stack_transient(&self, value: Box<T>) -> PoolGuard<'_, T, F> {
PoolGuard { pool: self, value: Ok(value), discard: true }
}
}
impl<T: core::fmt::Debug, F> core::fmt::Debug for Pool<T, F> {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
f.debug_struct("Pool")
.field("stacks", &self.stacks)
.field("owner", &self.owner)
.field("owner_val", &self.owner_val)
.finish()
}
}
pub(super) struct PoolGuard<'a, T: Send, F: Fn() -> T> {
pool: &'a Pool<T, F>,
value: Result<Box<T>, usize>,
discard: bool,
}
impl<'a, T: Send, F: Fn() -> T> PoolGuard<'a, T, F> {
#[inline]
pub(super) fn value(&self) -> &T {
match self.value {
Ok(ref v) => v,
Err(id) => unsafe {
debug_assert_ne!(THREAD_ID_DROPPED, id);
(*self.pool.owner_val.get()).as_ref().unwrap_unchecked()
},
}
}
#[inline]
pub(super) fn value_mut(&mut self) -> &mut T {
match self.value {
Ok(ref mut v) => v,
Err(id) => unsafe {
debug_assert_ne!(THREAD_ID_DROPPED, id);
(*self.pool.owner_val.get()).as_mut().unwrap_unchecked()
},
}
}
#[inline]
pub(super) fn put(this: PoolGuard<'_, T, F>) {
let mut this = core::mem::ManuallyDrop::new(this);
this.put_imp();
}
#[inline(always)]
fn put_imp(&mut self) {
match core::mem::replace(&mut self.value, Err(THREAD_ID_DROPPED)) {
Ok(value) => {
if self.discard {
return;
}
self.pool.put_value(value);
}
Err(owner) => {
assert_ne!(THREAD_ID_DROPPED, owner);
self.pool.owner.store(owner, Ordering::Release);
}
}
}
}
impl<'a, T: Send, F: Fn() -> T> Drop for PoolGuard<'a, T, F> {
#[inline]
fn drop(&mut self) {
self.put_imp();
}
}
impl<'a, T: Send + core::fmt::Debug, F: Fn() -> T> core::fmt::Debug
for PoolGuard<'a, T, F>
{
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
f.debug_struct("PoolGuard")
.field("pool", &self.pool)
.field("value", &self.value)
.finish()
}
}
}
#[cfg(not(feature = "std"))]
mod inner {
use core::{
cell::UnsafeCell,
panic::{RefUnwindSafe, UnwindSafe},
sync::atomic::{AtomicBool, Ordering},
};
use alloc::{boxed::Box, vec, vec::Vec};
pub(super) struct Pool<T, F> {
stack: Mutex<Vec<Box<T>>>,
create: F,
}
impl<T: UnwindSafe, F: UnwindSafe> RefUnwindSafe for Pool<T, F> {}
impl<T, F> Pool<T, F> {
pub(super) const fn new(create: F) -> Pool<T, F> {
Pool { stack: Mutex::new(vec![]), create }
}
}
impl<T: Send, F: Fn() -> T> Pool<T, F> {
#[inline]
pub(super) fn get(&self) -> PoolGuard<'_, T, F> {
let mut stack = self.stack.lock();
let value = match stack.pop() {
None => Box::new((self.create)()),
Some(value) => value,
};
PoolGuard { pool: self, value: Some(value) }
}
#[inline]
fn put(&self, guard: PoolGuard<'_, T, F>) {
let mut guard = core::mem::ManuallyDrop::new(guard);
if let Some(value) = guard.value.take() {
self.put_value(value);
}
}
#[inline]
fn put_value(&self, value: Box<T>) {
let mut stack = self.stack.lock();
stack.push(value);
}
}
impl<T: core::fmt::Debug, F> core::fmt::Debug for Pool<T, F> {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
f.debug_struct("Pool").field("stack", &self.stack).finish()
}
}
pub(super) struct PoolGuard<'a, T: Send, F: Fn() -> T> {
pool: &'a Pool<T, F>,
value: Option<Box<T>>,
}
impl<'a, T: Send, F: Fn() -> T> PoolGuard<'a, T, F> {
#[inline]
pub(super) fn value(&self) -> &T {
self.value.as_deref().unwrap()
}
#[inline]
pub(super) fn value_mut(&mut self) -> &mut T {
self.value.as_deref_mut().unwrap()
}
#[inline]
pub(super) fn put(this: PoolGuard<'_, T, F>) {
let mut this = core::mem::ManuallyDrop::new(this);
this.put_imp();
}
#[inline(always)]
fn put_imp(&mut self) {
if let Some(value) = self.value.take() {
self.pool.put_value(value);
}
}
}
impl<'a, T: Send, F: Fn() -> T> Drop for PoolGuard<'a, T, F> {
#[inline]
fn drop(&mut self) {
self.put_imp();
}
}
impl<'a, T: Send + core::fmt::Debug, F: Fn() -> T> core::fmt::Debug
for PoolGuard<'a, T, F>
{
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
f.debug_struct("PoolGuard")
.field("pool", &self.pool)
.field("value", &self.value)
.finish()
}
}
#[derive(Debug)]
struct Mutex<T> {
locked: AtomicBool,
data: UnsafeCell<T>,
}
unsafe impl<T: Send> Sync for Mutex<T> {}
impl<T> Mutex<T> {
const fn new(value: T) -> Mutex<T> {
Mutex {
locked: AtomicBool::new(false),
data: UnsafeCell::new(value),
}
}
#[inline]
fn lock(&self) -> MutexGuard<'_, T> {
while self
.locked
.compare_exchange(
false,
true,
Ordering::AcqRel,
Ordering::Acquire,
)
.is_err()
{
core::hint::spin_loop();
}
let data = unsafe { &mut *self.data.get() };
MutexGuard { locked: &self.locked, data }
}
}
#[derive(Debug)]
struct MutexGuard<'a, T> {
locked: &'a AtomicBool,
data: &'a mut T,
}
impl<'a, T> core::ops::Deref for MutexGuard<'a, T> {
type Target = T;
#[inline]
fn deref(&self) -> &T {
self.data
}
}
impl<'a, T> core::ops::DerefMut for MutexGuard<'a, T> {
#[inline]
fn deref_mut(&mut self) -> &mut T {
self.data
}
}
impl<'a, T> Drop for MutexGuard<'a, T> {
#[inline]
fn drop(&mut self) {
self.locked.store(false, Ordering::Release);
}
}
}
#[cfg(test)]
mod tests {
use core::panic::{RefUnwindSafe, UnwindSafe};
use alloc::{boxed::Box, vec, vec::Vec};
use super::*;
#[test]
fn oibits() {
fn assert_oitbits<T: Send + Sync + UnwindSafe + RefUnwindSafe>() {}
assert_oitbits::<Pool<Vec<u32>>>();
assert_oitbits::<Pool<core::cell::RefCell<Vec<u32>>>>();
assert_oitbits::<
Pool<
Vec<u32>,
Box<
dyn Fn() -> Vec<u32>
+ Send
+ Sync
+ UnwindSafe
+ RefUnwindSafe,
>,
>,
>();
}
#[cfg(feature = "std")]
#[test]
fn thread_owner_optimization() {
use std::{cell::RefCell, sync::Arc, vec};
let pool: Arc<Pool<RefCell<Vec<char>>>> =
Arc::new(Pool::new(|| RefCell::new(vec!['a'])));
pool.get().borrow_mut().push('x');
let pool1 = pool.clone();
let t1 = std::thread::spawn(move || {
let guard = pool1.get();
guard.borrow_mut().push('y');
});
let pool2 = pool.clone();
let t2 = std::thread::spawn(move || {
let guard = pool2.get();
guard.borrow_mut().push('z');
});
t1.join().unwrap();
t2.join().unwrap();
assert_eq!(vec!['a', 'x'], *pool.get().borrow());
}
#[test]
fn thread_owner_distinct() {
let pool = Pool::new(|| vec!['a']);
{
let mut g1 = pool.get();
let v1 = &mut *g1;
let mut g2 = pool.get();
let v2 = &mut *g2;
v1.push('b');
v2.push('c');
assert_eq!(&mut vec!['a', 'b'], v1);
assert_eq!(&mut vec!['a', 'c'], v2);
}
assert_eq!(&mut vec!['a', 'b'], &mut *pool.get());
}
#[cfg(feature = "std")]
#[test]
fn thread_owner_sync() {
let pool = Pool::new(|| vec!['a']);
{
let mut g1 = pool.get();
let mut g2 = pool.get();
std::thread::scope(|s| {
s.spawn(|| {
g1.push('b');
});
s.spawn(|| {
g2.push('c');
});
});
let v1 = &mut *g1;
let v2 = &mut *g2;
assert_eq!(&mut vec!['a', 'b'], v1);
assert_eq!(&mut vec!['a', 'c'], v2);
}
assert_eq!(&mut vec!['a', 'b'], &mut *pool.get());
}
#[cfg(feature = "std")]
#[test]
fn thread_owner_send_drop() {
let pool = Pool::new(|| vec!['a']);
{
pool.get().push('b');
}
std::thread::scope(|s| {
let mut g = pool.get();
assert_eq!(&vec!['a', 'b'], &*g);
s.spawn(move || {
g.push('c');
})
.join()
.unwrap();
});
assert_eq!(&vec!['a', 'b', 'c'], &*pool.get());
}
}