diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index d44ffe2..13af139 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -2,9 +2,9 @@ name: Lint on: push: - branches: [ "main" ] + branches: ["main"] pull_request: - branches: [ "main" ] + branches: ["main"] jobs: build: @@ -12,8 +12,10 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 - - name: Run clippy - run: cargo clippy --all-targets --all-features -- -D warnings - - name: Run rustfmt - run: cargo fmt --check + - uses: actions/checkout@v3 + - name: Run clippy + run: cargo clippy --all-targets --all-features -- -D warnings + - name: Run clippy + run: cargo clippy --release --all-targets --all-features -- -D warnings + - name: Run rustfmt + run: cargo fmt --check diff --git a/.gitignore b/.gitignore index 4fffb2f..2963bc4 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,4 @@ /target /Cargo.lock + +.idea diff --git a/src/level.rs b/src/level.rs new file mode 100644 index 0000000..94fb667 --- /dev/null +++ b/src/level.rs @@ -0,0 +1,191 @@ +#[cfg(debug_assertions)] +use std::{cell::RefCell, thread_local}; + +#[cfg(debug_assertions)] +thread_local! { + /// We hold a stack of thread local lock levels. + /// + /// * Thread local: We want to trace the lock level for each native system thread. Also making it + /// thread local implies that this needs no synchronization. + /// * Stack: Just holding the current lock level would be insufficient in situations there locks + /// are released in a different order, from what they were acquired in. This way we can + /// support scenarios like e.g.: Acquire A, Acquire B, Release A, Acquire C, ... + /// * RefCell: Static implies immutability in safe code, yet we want to mutate it. So we use a + /// `RefCell` to acquire interior mutability. + static LOCK_LEVELS: RefCell> = const { RefCell::new(Vec::new()) }; +} + +#[derive(Debug)] +pub(crate) struct Level { + /// Level of this mutex in the hierarchy. Higher levels must be acquired first if locks are to + /// be held simultaneously. + #[cfg(debug_assertions)] + pub(crate) level: u32, +} + +impl Default for Level { + #[inline] + fn default() -> Self { + Self::new(0) + } +} + +impl Level { + #[inline] + pub fn new(level: u32) -> Self { + #[cfg(not(debug_assertions))] + let _ = level; + Self { + #[cfg(debug_assertions)] + level, + } + } + + #[inline] + pub fn lock(&self) -> LevelGuard { + #[cfg(debug_assertions)] + LOCK_LEVELS.with(|levels| { + let mut levels = levels.borrow_mut(); + if let Some(&lowest) = levels.last() { + if lowest <= self.level { + panic!( + "Tried to acquire lock with level {} while a lock with level {} \ + is acquired. This is a violation of lock hierarchies which \ + could lead to deadlocks.", + self.level, lowest + ) + } + } + levels.push(self.level); + }); + LevelGuard { + #[cfg(debug_assertions)] + level: self.level, + } + } +} + +pub struct LevelGuard { + #[cfg(debug_assertions)] + pub(crate) level: u32, +} + +#[cfg(debug_assertions)] +impl Drop for LevelGuard { + #[inline] + fn drop(&mut self) { + LOCK_LEVELS.with(|levels| { + let mut levels = levels.borrow_mut(); + let index = levels + .iter() + .rposition(|&level| level == self.level) + .expect("Position must exist, because we inserted it during lock!"); + levels.remove(index); + }); + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + #[cfg(debug_assertions)] + #[should_panic( + expected = "Tried to acquire lock with level 0 while a lock with level 0 is acquired. This is a violation of lock hierarchies which could lead to deadlocks." + )] + fn self_deadlock_detected() { + let mutex = Level::new(0); + let _guard_a = mutex.lock(); + // This must panic + let _guard_b = mutex.lock(); + } + + #[test] + #[cfg(debug_assertions)] + #[should_panic( + expected = "Tried to acquire lock with level 0 while a lock with level 0 is acquired. This is a violation of lock hierarchies which could lead to deadlocks." + )] + fn panic_if_two_mutexes_with_level_0_are_acquired() { + let mutex_a = Level::new(0); + let mutex_b = Level::new(0); + + // Fine, first mutex in thread + let _guard_a = mutex_a.lock(); + // Must panic, lock hierarchy violation + let _guard_b = mutex_b.lock(); + } + + #[test] + #[cfg(debug_assertions)] + fn created_by_default_impl_should_be_level_0() { + // This test would fail if mutex_a had any level greater than 0. + let mutex = Level::default(); + assert_eq!(mutex.level, 0); + } + + #[test] + #[cfg(debug_assertions)] + #[should_panic( + expected = "Tried to acquire lock with level 1 while a lock with level 0 is acquired. This is a violation of lock hierarchies which could lead to deadlocks." + )] + fn panic_if_0_is_acquired_before_1() { + let mutex_a = Level::new(0); // Level 0 + let mutex_b = Level::new(1); // Level 1 + + // Fine, first mutex in thread + let _guard_a = mutex_a.lock(); + // Must panic, lock hierarchy violation + let _guard_b = mutex_b.lock(); + } + + #[test] + #[cfg(not(debug_assertions))] + fn should_not_check_in_release_build() { + let mutex_a = Level::new(0); + let mutex_b = Level::new(0); + + // Fine, first mutex in thread + let _guard_a = mutex_a.lock(); + // Lock hierarchy violation, but we do not panic, since debug assertions are not active + let _guard_b = mutex_b.lock(); + } + + #[test] + fn two_level_0_in_succession() { + let mutex_a = Level::new(5); // Level 0 + let mutex_b = Level::new(42); // also level 0 + { + // Fine, first mutex in thread + let _guard_a = mutex_a.lock(); + } + // Fine, first mutex has already been dropped + let _guard_b = mutex_b.lock(); + } + + #[test] + fn simultaneous_lock_if_higher_is_acquired_first() { + let mutex_a = Level::new(1); + let mutex_b = Level::new(0); + + // Fine, first mutex in thread + let _guard_a = mutex_a.lock(); + // Fine: 0 is lower level than 1 + let _guard_b = mutex_b.lock(); + } + + #[test] + fn any_order_of_release() { + let mutex_a = Level::new(2); + let mutex_b = Level::new(1); + let mutex_c = Level::new(0); + + // Fine, first mutex in thread + let _guard_a = mutex_a.lock(); + // Fine: 0 is lower level than 1 + let guard_b = mutex_b.lock(); + let _guard_c = mutex_c.lock(); + #[allow(clippy::drop_non_drop)] + drop(guard_b) + } +} diff --git a/src/lib.rs b/src/lib.rs index ad1d467..494cd2b 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,136 +1,22 @@ //! This crate offers debug assertions for violations of lock hierarchies. No runtime overhead or //! protection occurs for release builds. +//! +//! Each lock is assigned a level. Locks with higher levels must be acquired before locks with +//! lower levels. +//! Both [RwLock] and [Mutex] use the same hierarchy. -#[cfg(debug_assertions)] -use std::{cell::RefCell, thread_local}; -use std::{ - ops::{Deref, DerefMut}, - sync::PoisonError, -}; +mod level; +mod mutex; +mod rwlock; -#[cfg(debug_assertions)] -thread_local! { - /// We hold a stack of thread local lock levels. - /// - /// * Thread local: We want to trace the lock level for each native system thread. Also making it - /// thread local implies that this needs no synchronization. - /// * Stack: Just holding the current lock level would be insufficient in situations there locks - /// are released in a different order, from what they were acquired in. This way we can - /// support scenarios like e.g.: Acquire A, Acquire B, Release A, Acquire C, ... - /// * RefCell: Static implies immutability in safe code, yet we want to mutate it. So we use a - /// `RefCell` to acquire interiour mutability. - static LOCK_LEVELS: RefCell> = const { RefCell::new(Vec::new()) }; -} - -/// Wrapper around a [`std::sync::Mutex`] which uses a thread local variable in order to check for -/// lock hierachy violations in debug builds. -/// -/// Each Mutex is assigned a level. Mutexes with higher levels must be acquired before mutexes with -/// lower levels. -/// -/// ``` -/// use lock_hierarchy::Mutex; -/// -/// let mutex_a = Mutex::new(()); // Level 0 -/// let mutex_b = Mutex::with_level((), 0); // also level 0 -/// // Fine, first mutex in thread -/// let _guard_a = mutex_a.lock().unwrap(); -/// // Would panic, lock hierarchy violation -/// // let _guard_b = mutex_b.lock().unwrap(); -/// ``` -#[derive(Debug, Default)] -pub struct Mutex { - /// Level of this mutex in the hierarchy. Higher levels must be acquired first if locks are to - /// be held simultaniously. - #[cfg(debug_assertions)] - level: u32, - inner: std::sync::Mutex, -} +use std::sync::{LockResult, PoisonError}; -impl Mutex { - /// Creates Mutex with level 0. Use this constructor if you want to get an error in debug builds - /// every time you acquire another mutex while holding this one. - pub fn new(t: T) -> Self { - Self::with_level(t, 0) - } - - /// Creates a mutex and assigns it a level in the lock hierarchy. Higher levels must be acquired - /// first if locks are to be held simultaniously. This way we can ensure locks are always - /// acquired in the same order. This prevents deadlocks. - pub fn with_level(t: T, level: u32) -> Self { - // Explicitly ignore level in release builds - #[cfg(not(debug_assertions))] - let _ = level; - Mutex { - #[cfg(debug_assertions)] - level, - inner: std::sync::Mutex::new(t), - } - } - - pub fn lock(&self) -> Result, PoisonError>> { - #[cfg(debug_assertions)] - LOCK_LEVELS.with(|levels| { - let mut levels = levels.borrow_mut(); - if let Some(&lowest) = levels.last() { - if lowest <= self.level { - panic!( - "Tried to acquire lock to a mutex with level {}. Yet lock with level {} \ - had been acquired first. This is a violation of lock hierarchies which \ - could lead to deadlocks.", - self.level, lowest - ) - } - assert!(lowest > self.level) - } - levels.push(self.level); - }); - self.inner.lock().map(|guard| MutexGuard { - #[cfg(debug_assertions)] - level: self.level, - inner: guard, - }) - } -} - -impl From for Mutex { - /// Creates a new mutex in an unlocked state ready for use. - /// This is equivalent to [`Mutex::new`]. - fn from(value: T) -> Self { - Mutex::new(value) - } -} - -pub struct MutexGuard<'a, T> { - #[cfg(debug_assertions)] - level: u32, - inner: std::sync::MutexGuard<'a, T>, -} - -impl Drop for MutexGuard<'_, T> { - fn drop(&mut self) { - #[cfg(debug_assertions)] - LOCK_LEVELS.with(|levels| { - let mut levels = levels.borrow_mut(); - let index = levels - .iter() - .rposition(|&level| level == self.level) - .expect("Position must exist, because we inserted it during lock!"); - levels.remove(index); - }); - } -} - -impl Deref for MutexGuard<'_, T> { - type Target = T; - - fn deref(&self) -> &T { - self.inner.deref() - } -} +pub use mutex::{Mutex, MutexGuard}; +pub use rwlock::{RwLock, RwLockReadGuard, RwLockWriteGuard}; -impl DerefMut for MutexGuard<'_, T> { - fn deref_mut(&mut self) -> &mut Self::Target { - self.inner.deref_mut() +pub(crate) fn map_guard(result: LockResult, f: impl FnOnce(G) -> F) -> LockResult { + match result { + Ok(guard) => Ok(f(guard)), + Err(err) => Err(PoisonError::new(f(err.into_inner()))), } } diff --git a/src/mutex.rs b/src/mutex.rs new file mode 100644 index 0000000..ffc0b65 --- /dev/null +++ b/src/mutex.rs @@ -0,0 +1,202 @@ +use std::{ + fmt::{Debug, Display, Formatter}, + ops::{Deref, DerefMut}, + sync::LockResult, +}; + +use crate::{ + level::{Level, LevelGuard}, + map_guard, +}; + +/// Wrapper around a [`std::sync::Mutex`] which uses a thread local variable in order to check for +/// lock hierarchy violations in debug builds. +/// +/// See the [crate level documentation](crate) for more general information. +/// +/// ``` +/// use lock_hierarchy::Mutex; +/// +/// let mutex_a = Mutex::new(()); // Level 0 +/// let mutex_b = Mutex::with_level((), 0); // also level 0 +/// // Fine, first mutex in thread +/// let _guard_a = mutex_a.lock().unwrap(); +/// // Would panic, lock hierarchy violation +/// // let _guard_b = mutex_b.lock().unwrap(); +/// ``` +#[derive(Debug, Default)] +pub struct Mutex { + inner: std::sync::Mutex, + level: Level, +} + +impl Mutex { + /// Creates lock with level 0. Use this constructor if you want to get an error in debug builds + /// every time you acquire another lock while holding this one. + pub fn new(t: T) -> Self { + Self::with_level(t, 0) + } + + /// Creates a lock and assigns it a level in the lock hierarchy. Higher levels must be acquired + /// first if locks are to be held simultaneously. This way we can ensure locks are always + /// acquired in the same order. This prevents deadlocks. + pub fn with_level(t: T, level: u32) -> Self { + Mutex { + inner: std::sync::Mutex::new(t), + level: Level::new(level), + } + } + + /// See [std::sync::Mutex::lock] + pub fn lock(&self) -> LockResult> { + let level = self.level.lock(); + map_guard(self.inner.lock(), |guard| MutexGuard { + inner: guard, + _level: level, + }) + } + + /// See [std::sync::Mutex::get_mut] + pub fn get_mut(&mut self) -> LockResult<&mut T> { + // No need to check hierarchy, this does not lock + self.inner.get_mut() + } + + /// See [std::sync::Mutex::into_inner] + pub fn into_inner(self) -> LockResult { + // No need to check hierarchy, this does not lock + self.inner.into_inner() + } +} + +impl From for Mutex { + /// Creates a new mutex in an unlocked state ready for use. + /// This is equivalent to [`Mutex::new`]. + fn from(value: T) -> Self { + Mutex::new(value) + } +} + +pub struct MutexGuard<'a, T> { + inner: std::sync::MutexGuard<'a, T>, + _level: LevelGuard, +} + +impl Debug for MutexGuard<'_, T> { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + Debug::fmt(&self.inner, f) + } +} + +impl Display for MutexGuard<'_, T> { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + Display::fmt(&self.inner, f) + } +} + +impl Deref for MutexGuard<'_, T> { + type Target = T; + + fn deref(&self) -> &T { + self.inner.deref() + } +} + +impl DerefMut for MutexGuard<'_, T> { + fn deref_mut(&mut self) -> &mut Self::Target { + self.inner.deref_mut() + } +} + +#[cfg(test)] +mod tests { + use std::{hint::black_box, sync::Arc, thread}; + + use super::*; + + #[test] + fn acquire_resource() { + let mutex = Mutex::new(42); + let guard = mutex.lock().unwrap(); + + assert_eq!(42, *guard) + } + + #[test] + fn allow_mutation() { + let mutex = Mutex::new(42); + let mut guard = mutex.lock().unwrap(); + + *guard = 43; + + assert_eq!(43, *guard) + } + + #[test] + fn multithreaded() { + let mutex = Arc::new(Mutex::new(())); + let thread = thread::spawn({ + let mutex = mutex.clone(); + move || { + black_box(mutex.lock().unwrap()); + } + }); + black_box(mutex.lock().unwrap()); + thread.join().unwrap(); + } + + #[test] + #[should_panic( + expected = "Tried to acquire lock with level 0 while a lock with level 0 is acquired. This is a violation of lock hierarchies which could lead to deadlocks." + )] + #[cfg(debug_assertions)] + fn self_deadlock() { + // This ensures that the level is locked in Mutex::lock before locking the std lock which might otherwise cause an unchecked deadlock + let mutex = Mutex::new(()); + let _guard = mutex.lock().unwrap(); + let _guard = mutex.lock().unwrap(); + } + + #[test] + #[should_panic( + expected = "Tried to acquire lock with level 0 while a lock with level 0 is acquired. This is a violation of lock hierarchies which could lead to deadlocks." + )] + #[cfg(debug_assertions)] + fn poisoned_lock() { + let mutex = Mutex::new(()); + std::panic::catch_unwind(|| { + let _guard = mutex.lock(); + panic!("lock is poisoned now"); + }) + .unwrap_err(); + + let _guard_a = mutex.lock().unwrap_err().into_inner(); + let _guard_b = mutex.lock(); + } + + #[test] + #[cfg(debug_assertions)] + fn correct_level_locked() { + let mutex = Mutex::with_level((), 1); + let _guard_a = mutex.lock().unwrap(); + assert_eq!(_guard_a._level.level, 1); + + let mutex = Mutex::new(()); + let _guard_a = mutex.lock().unwrap(); + assert_eq!(_guard_a._level.level, 0); + } + + #[test] + #[cfg(debug_assertions)] + fn created_by_default_impl_should_be_level_0() { + let mutex = Mutex::<()>::default(); + assert_eq!(mutex.level.level, 0); + } + + #[test] + #[cfg(debug_assertions)] + fn mutex_created_by_from_impl_should_be_level_0() { + let mutex: Mutex = 42.into(); + assert_eq!(mutex.level.level, 0); + } +} diff --git a/src/rwlock.rs b/src/rwlock.rs new file mode 100644 index 0000000..2f3de11 --- /dev/null +++ b/src/rwlock.rs @@ -0,0 +1,280 @@ +use std::{ + fmt::{Debug, Display, Formatter}, + ops::{Deref, DerefMut}, + sync::LockResult, +}; + +use crate::{ + level::{Level, LevelGuard}, + map_guard, +}; + +/// Wrapper around a [`std::sync::RwLock`] which uses a thread local variable in order to check for +/// lock hierarchy violations in debug builds. +/// +/// See the [crate level documentation](crate) for more general information. +/// +/// ``` +/// use lock_hierarchy::RwLock; +/// +/// let mutex_a = RwLock::new(()); // Level 0 +/// let mutex_b = RwLock::with_level((), 0); // also level 0 +/// // Fine, first mutex in thread +/// let _guard_a = mutex_a.read().unwrap(); +/// // Would panic, lock hierarchy violation +/// // let _guard_b = mutex_b.read().unwrap(); +/// ``` +#[derive(Debug, Default)] +pub struct RwLock { + inner: std::sync::RwLock, + level: Level, +} + +impl RwLock { + /// Creates a lock with level 0. Use this constructor if you want to get an error in debug builds + /// every time you acquire another lock while holding this one. + pub fn new(t: T) -> Self { + Self::with_level(t, 0) + } + + /// Creates a lock and assigns it a level in the lock hierarchy. Higher levels must be acquired + /// first if locks are to be held simultaneously. This way we can ensure locks are always + /// acquired in the same order. This prevents deadlocks. + pub fn with_level(t: T, level: u32) -> Self { + RwLock { + inner: std::sync::RwLock::new(t), + level: Level::new(level), + } + } + + /// See [std::sync::RwLock::read] + pub fn read(&self) -> LockResult> { + let level = self.level.lock(); + map_guard(self.inner.read(), |guard| RwLockReadGuard { + inner: guard, + _level: level, + }) + } + + /// See [std::sync::RwLock::write] + pub fn write(&self) -> LockResult> { + let level = self.level.lock(); + map_guard(self.inner.write(), |guard| RwLockWriteGuard { + inner: guard, + _level: level, + }) + } + + /// See [std::sync::RwLock::get_mut] + pub fn get_mut(&mut self) -> LockResult<&mut T> { + // No need to check hierarchy, this does not lock + self.inner.get_mut() + } + + /// See [std::sync::RwLock::into_inner] + pub fn into_inner(self) -> LockResult { + // No need to check hierarchy, this does not lock + self.inner.into_inner() + } +} + +impl From for RwLock { + /// Creates a new mutex in an unlocked state ready for use. + /// This is equivalent to [`RwLock::new`]. + fn from(value: T) -> Self { + RwLock::new(value) + } +} + +pub struct RwLockReadGuard<'a, T> { + inner: std::sync::RwLockReadGuard<'a, T>, + _level: LevelGuard, +} + +impl Debug for RwLockReadGuard<'_, T> { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + Debug::fmt(&self.inner, f) + } +} + +impl Display for RwLockReadGuard<'_, T> { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + Display::fmt(&self.inner, f) + } +} + +impl Deref for RwLockReadGuard<'_, T> { + type Target = T; + + fn deref(&self) -> &T { + self.inner.deref() + } +} + +pub struct RwLockWriteGuard<'a, T> { + inner: std::sync::RwLockWriteGuard<'a, T>, + _level: LevelGuard, +} + +impl Debug for RwLockWriteGuard<'_, T> { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + Debug::fmt(&self.inner, f) + } +} + +impl Display for RwLockWriteGuard<'_, T> { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + Display::fmt(&self.inner, f) + } +} + +impl Deref for RwLockWriteGuard<'_, T> { + type Target = T; + + fn deref(&self) -> &T { + self.inner.deref() + } +} + +impl DerefMut for RwLockWriteGuard<'_, T> { + fn deref_mut(&mut self) -> &mut Self::Target { + self.inner.deref_mut() + } +} + +#[cfg(test)] +mod tests { + use std::{hint::black_box, sync::Arc, thread}; + + use super::*; + + #[test] + fn acquire_resource() { + let mutex = RwLock::new(42); + let guard = mutex.read().unwrap(); + assert_eq!(42, *guard); + drop(guard); + + let guard = mutex.write().unwrap(); + assert_eq!(42, *guard); + drop(guard); + } + + #[test] + fn allow_mutation() { + let mutex = RwLock::new(42); + let mut guard = mutex.write().unwrap(); + + *guard = 43; + + assert_eq!(43, *guard) + } + + #[test] + fn multithreaded() { + let mutex = Arc::new(RwLock::new(())); + let thread = thread::spawn({ + let mutex = mutex.clone(); + move || { + black_box(mutex.read().unwrap()); + black_box(mutex.write().unwrap()); + } + }); + black_box(mutex.read().unwrap()); + black_box(mutex.write().unwrap()); + thread.join().unwrap(); + } + + #[cfg(debug_assertions)] + fn poisoned_lock() -> RwLock<()> { + let mutex = RwLock::new(()); + std::panic::catch_unwind(|| { + let _guard = mutex.write(); + panic!("lock is poisoned now"); + }) + .unwrap_err(); + mutex + } + + #[test] + #[should_panic( + expected = "Tried to acquire lock with level 0 while a lock with level 0 is acquired. This is a violation of lock hierarchies which could lead to deadlocks." + )] + #[cfg(debug_assertions)] + fn poisoned_read_lock() { + let mutex = poisoned_lock(); + + let _guard_a = mutex.read().unwrap_err().into_inner(); + let _guard_b = mutex.read(); + } + + #[test] + #[should_panic( + expected = "Tried to acquire lock with level 0 while a lock with level 0 is acquired. This is a violation of lock hierarchies which could lead to deadlocks." + )] + #[cfg(debug_assertions)] + fn poisoned_write_lock() { + let mutex = poisoned_lock(); + + let _guard_a = mutex.write().unwrap_err().into_inner(); + let _guard_b = mutex.write(); + } + + #[test] + #[should_panic( + expected = "Tried to acquire lock with level 0 while a lock with level 0 is acquired. This is a violation of lock hierarchies which could lead to deadlocks." + )] + #[cfg(debug_assertions)] + fn self_deadlock_write() { + // This ensures that the level is locked in RwLock::write before locking the std lock which might otherwise cause a deadlock + let mutex = RwLock::new(()); + let _guard = mutex.read().unwrap(); + let _guard = mutex.write().unwrap(); + } + + #[test] + #[should_panic( + expected = "Tried to acquire lock with level 0 while a lock with level 0 is acquired. This is a violation of lock hierarchies which could lead to deadlocks." + )] + #[cfg(debug_assertions)] + fn self_deadlock_read() { + // This ensures that the level is locked in RwLock::read before locking the std lock which might otherwise cause an unchecked deadlock + let mutex = RwLock::new(()); + let _guard = mutex.read().unwrap(); + let _guard = mutex.read().unwrap(); + } + + #[test] + #[cfg(debug_assertions)] + fn correct_level_locked() { + let mutex = RwLock::with_level((), 1); + let guard = mutex.read().unwrap(); + assert_eq!(guard._level.level, 1); + drop(guard); + let guard = mutex.write().unwrap(); + assert_eq!(guard._level.level, 1); + drop(guard); + + let mutex = RwLock::new(()); + let guard = mutex.read().unwrap(); + assert_eq!(guard._level.level, 0); + drop(guard); + let guard = mutex.write().unwrap(); + assert_eq!(guard._level.level, 0); + drop(guard); + } + + #[test] + #[cfg(debug_assertions)] + fn created_by_default_impl_should_be_level_0() { + let mutex = RwLock::<()>::default(); + assert_eq!(mutex.level.level, 0); + } + + #[test] + #[cfg(debug_assertions)] + fn mutex_created_by_from_impl_should_be_level_0() { + let mutex: RwLock = 42.into(); + assert_eq!(mutex.level.level, 0); + } +} diff --git a/tests/integration.rs b/tests/integration.rs deleted file mode 100644 index 1a5dfe4..0000000 --- a/tests/integration.rs +++ /dev/null @@ -1,122 +0,0 @@ -use lock_hierarchy::Mutex; - -#[test] -fn acquire_resource() { - let mutex = Mutex::new(42); - let guard = mutex.lock().unwrap(); - - assert_eq!(42, *guard) -} - -#[test] -fn should_allow_mutation() { - let mutex = Mutex::new(42); - let mut guard = mutex.lock().unwrap(); - - *guard = 43; - - assert_eq!(43, *guard) -} - -#[test] -#[cfg(debug_assertions)] -#[should_panic] -fn should_panic_if_two_mutices_with_level_0_are_acquired() { - let mutex_a = Mutex::new(()); // Level 0 - let mutex_b = Mutex::new(()); // also level 0 - - // Fine, first mutex in thread - let _guard_a = mutex_a.lock().unwrap(); - // Must panic, lock hierarchy violation - let _guard_b = mutex_b.lock().unwrap(); -} - -#[test] -#[cfg(debug_assertions)] -#[should_panic] -fn mutex_created_by_default_impl_should_be_level_0() { - // This test would fail if mutex_a had any level greater than 0. - let mutex_a: Mutex<()> = Mutex::default(); // Level 0 - let mutex_b = Mutex::with_level((), 0); // also level 0 - - // Fine, first mutex in thread - let _guard_a = mutex_a.lock().unwrap(); - // Must panic, lock hierarchy violation - let _guard_b = mutex_b.lock().unwrap(); -} - -#[test] -#[cfg(debug_assertions)] -#[should_panic] -fn mutex_created_by_from_impl_should_be_level_0() { - // This test would fail if mutex_a had any level greater than 0. - let mutex_a: Mutex = 42.into(); // Level 0 - let mutex_b = Mutex::with_level(5, 0); // also level 0 - - // Fine, first mutex in thread - let _guard_a = mutex_a.lock().unwrap(); - // Must panic, lock hierarchy violation - let _guard_b = mutex_b.lock().unwrap(); -} - -#[test] -#[cfg(debug_assertions)] -#[should_panic] -fn should_panic_if_0_is_acquired_before_1() { - let mutex_a = Mutex::new(()); // Level 0 - let mutex_b = Mutex::with_level((), 1); // Level 1 - - // Fine, first mutex in thread - let _guard_a = mutex_a.lock().unwrap(); - // Must panic, lock hierarchy violation - let _guard_b = mutex_b.lock().unwrap(); -} - -#[test] -#[cfg(not(debug_assertions))] -fn should_not_check_in_release_build() { - let mutex_a = Mutex::new(5); // Level 0 - let mutex_b = Mutex::new(42); // also level 0 - - // Fine, first mutex in thread - let _guard_a = mutex_a.lock().unwrap(); - // Lock hierarchy violation, but we do not panic, since debug assertions are not active - let _guard_b = mutex_b.lock().unwrap(); -} - -#[test] -fn should_allow_for_two_level_0_in_succession() { - let mutex_a = Mutex::new(5); // Level 0 - let mutex_b = Mutex::new(42); // also level 0 - - // Fine, first mutex in thread - let guard_a = mutex_a.lock().unwrap(); - drop(guard_a); - // Fine, first mutext has already been dropped - let _guard_b = mutex_b.lock().unwrap(); -} - -#[test] -fn should_allow_for_simultanous_lock_if_higher_is_acquired_first() { - let mutex_a = Mutex::with_level(5, 1); // Level 1 - let mutex_b = Mutex::new(42); // also level 0 - - // Fine, first mutex in thread - let _guard_a = mutex_a.lock().unwrap(); - // Fine: 0 is lower level than 1 - let _guard_b = mutex_b.lock().unwrap(); -} - -#[test] -fn should_allow_for_any_order_of_release() { - let mutex_a = Mutex::with_level((), 2); - let mutex_b = Mutex::with_level((), 1); - let mutex_c = Mutex::new(()); - - // Fine, first mutex in thread - let _guard_a = mutex_a.lock().unwrap(); - // Fine: 0 is lower level than 1 - let guard_b = mutex_b.lock().unwrap(); - let _guard_c = mutex_c.lock().unwrap(); - drop(guard_b) -}