#![deny(unsafe_code)]
use std::fmt;
#[cfg(not(feature = "loom"))]
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering::{self, Acquire, Relaxed};
#[cfg(feature = "loom")]
use loom::sync::atomic::AtomicUsize;
use crate::opcode::Opcode;
use crate::sync_primitive::SyncPrimitive;
use crate::wait_queue::WaitQueue;
#[derive(Default)]
pub struct Lock {
state: AtomicUsize,
}
impl Lock {
pub const MAX_SHARED_OWNERS: usize = WaitQueue::DATA_MASK - 1;
#[inline]
pub fn is_free(&self, mo: Ordering) -> bool {
let state = self.state.load(mo);
(state & WaitQueue::DATA_MASK) == 0
}
#[inline]
pub fn is_locked(&self, mo: Ordering) -> bool {
(self.state.load(mo) & WaitQueue::DATA_MASK) == WaitQueue::DATA_MASK
}
#[inline]
pub fn is_shared(&self, mo: Ordering) -> bool {
let share_state = self.state.load(mo) & WaitQueue::DATA_MASK;
share_state != 0 && share_state != WaitQueue::DATA_MASK
}
#[inline]
pub async fn lock_exclusive_async(&self) {
loop {
let (result, state) = self.try_lock_exclusive_internal();
if result {
return;
}
if self.wait_resources_async(state, Opcode::Exclusive).await {
return;
}
}
}
#[inline]
pub fn lock_exclusive_sync(&self) {
loop {
let (result, state) = self.try_lock_exclusive_internal();
if result {
return;
}
if self.wait_resources_sync(state, Opcode::Exclusive) {
return;
}
}
}
#[inline]
pub fn try_lock_exclusive(&self) -> bool {
self.try_lock_exclusive_internal().0
}
#[inline]
pub async fn lock_shared_async(&self) {
loop {
let (result, state) = self.try_lock_shared_internal();
if result {
return;
}
if self.wait_resources_async(state, Opcode::Shared).await {
return;
}
}
}
#[inline]
pub fn lock_shared_sync(&self) {
loop {
let (result, state) = self.try_lock_shared_internal();
if result {
return;
}
if self.wait_resources_sync(state, Opcode::Shared) {
return;
}
}
}
#[inline]
pub fn try_lock_shared(&self) -> bool {
self.try_lock_shared_internal().0
}
#[inline]
pub fn unlock_exclusive(&self) -> bool {
match self
.state
.compare_exchange(WaitQueue::DATA_MASK, 0, Acquire, Relaxed)
{
Ok(_) => true,
Err(state) => self.release_loop(state, Opcode::Exclusive),
}
}
#[inline]
pub fn unlock_shared(&self) -> bool {
match self.state.compare_exchange(1, 0, Acquire, Relaxed) {
Ok(_) => true,
Err(state) => self.release_loop(state, Opcode::Shared),
}
}
fn try_lock_exclusive_internal(&self) -> (bool, usize) {
let Err(mut state) = self
.state
.compare_exchange(0, WaitQueue::DATA_MASK, Acquire, Relaxed)
else {
return (true, 0);
};
loop {
if state & WaitQueue::ADDR_MASK != 0 || state & WaitQueue::DATA_MASK != 0 {
return (false, state);
}
if state & WaitQueue::DATA_MASK == 0 {
match self.state.compare_exchange(
state,
state | WaitQueue::DATA_MASK,
Acquire,
Relaxed,
) {
Ok(_) => return (true, 0),
Err(new_state) => state = new_state,
}
}
}
}
fn try_lock_shared_internal(&self) -> (bool, usize) {
let Err(mut state) = self.state.compare_exchange(0, 1, Acquire, Relaxed) else {
return (true, 0);
};
loop {
if state & WaitQueue::ADDR_MASK != 0
|| state & WaitQueue::DATA_MASK >= Self::MAX_SHARED_OWNERS
{
return (false, state);
}
match self
.state
.compare_exchange(state, state + 1, Acquire, Relaxed)
{
Ok(_) => return (true, 0),
Err(new_state) => state = new_state,
}
}
}
}
impl fmt::Debug for Lock {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let state = self.state.load(Relaxed);
let lock_share_state = state & WaitQueue::DATA_MASK;
let locked = lock_share_state == WaitQueue::DATA_MASK;
let share_count = if locked { 0 } else { lock_share_state };
let wait_queue_being_processed = state & WaitQueue::LOCKED_FLAG == WaitQueue::LOCKED_FLAG;
let wait_queue_tail_addr = state & WaitQueue::ADDR_MASK;
f.debug_struct("WaitQueue")
.field("state", &state)
.field("locked", &locked)
.field("share_count", &share_count)
.field("wait_queue_being_processed", &wait_queue_being_processed)
.field("wait_queue_tail_addr", &wait_queue_tail_addr)
.finish()
}
}
impl SyncPrimitive for Lock {
#[inline]
fn state(&self) -> &AtomicUsize {
&self.state
}
#[inline]
fn max_shared_owners() -> usize {
Self::MAX_SHARED_OWNERS
}
}