diff options
| author | Mica White <botahamec@gmail.com> | 2024-12-23 12:54:01 -0500 |
|---|---|---|
| committer | Mica White <botahamec@gmail.com> | 2024-12-23 12:54:01 -0500 |
| commit | cbdce939291800f297acc700e02db8b6798239ce (patch) | |
| tree | de3b407c3a93707937797194106419ba6b0eb23b /src | |
| parent | b2281e6aec631dc7c6d69edef9268ce7e00ed1dc (diff) | |
Utilize mutex death
Diffstat (limited to 'src')
| -rw-r--r-- | src/collection/boxed.rs | 55 | ||||
| -rw-r--r-- | src/collection/owned.rs | 57 | ||||
| -rw-r--r-- | src/collection/ref.rs | 50 | ||||
| -rw-r--r-- | src/collection/retry.rs | 42 | ||||
| -rw-r--r-- | src/collection/utils.rs | 42 | ||||
| -rw-r--r-- | src/mutex/mutex.rs | 48 | ||||
| -rw-r--r-- | src/rwlock/rwlock.rs | 72 |
7 files changed, 287 insertions, 79 deletions
diff --git a/src/collection/boxed.rs b/src/collection/boxed.rs index bef3df2..3cfc336 100644 --- a/src/collection/boxed.rs +++ b/src/collection/boxed.rs @@ -1,5 +1,5 @@ use std::alloc::Layout; -use std::cell::UnsafeCell; +use std::cell::{RefCell, UnsafeCell}; use std::fmt::Debug; use std::marker::PhantomData; @@ -20,7 +20,7 @@ fn contains_duplicates(l: &[&dyn RawLock]) -> bool { .any(|window| std::ptr::eq(window[0], window[1])) } -unsafe impl<L: Lockable + Send + Sync> RawLock for BoxedLockCollection<L> { +unsafe impl<L: Lockable> RawLock for BoxedLockCollection<L> { fn kill(&self) { for lock in &self.locks { lock.kill(); @@ -28,8 +28,15 @@ unsafe impl<L: Lockable + Send + Sync> RawLock for BoxedLockCollection<L> { } unsafe fn raw_lock(&self) { + let locks = self.locks(); + let locked = RefCell::new(Vec::with_capacity(locks.len())); + scopeguard::defer_on_unwind! { + utils::attempt_to_recover_locks_from_panic(&locked) + }; + for lock in self.locks() { lock.raw_lock(); + locked.borrow_mut().push(*lock); } } @@ -44,8 +51,15 @@ unsafe impl<L: Lockable + Send + Sync> RawLock for BoxedLockCollection<L> { } unsafe fn raw_read(&self) { + let locks = self.locks(); + let locked = RefCell::new(Vec::with_capacity(locks.len())); + scopeguard::defer_on_unwind! { + utils::attempt_to_recover_reads_from_panic(&locked) + }; + for lock in self.locks() { lock.raw_read(); + locked.borrow_mut().push(*lock); } } @@ -335,16 +349,16 @@ impl<L: Lockable> BoxedLockCollection<L> { &'g self, key: Key, ) -> LockGuard<'key, L::Guard<'g>, Key> { - for lock in self.locks() { + unsafe { // safety: we have the thread key - unsafe { lock.raw_lock() }; - } + self.raw_lock(); - LockGuard { - // safety: we've already acquired the lock - guard: unsafe { self.data().guard() }, - key, - _phantom: PhantomData, + LockGuard { + // safety: we've already acquired the lock + guard: self.data().guard(), + key, + _phantom: PhantomData, + } } } @@ -377,7 +391,7 @@ impl<L: Lockable> BoxedLockCollection<L> { key: Key, ) -> Option<LockGuard<'key, L::Guard<'g>, Key>> { let guard = unsafe { - if !utils::ordered_try_lock(self.locks()) { + if !self.raw_try_lock() { return None; } @@ -439,16 +453,16 @@ impl<L: Sharable> BoxedLockCollection<L> { &'g self, key: Key, ) -> LockGuard<'key, L::ReadGuard<'g>, Key> { - for lock in self.locks() { + unsafe { // safety: we have the thread key - unsafe { lock.raw_read() }; - } + self.raw_read(); - LockGuard { - // safety: we've already acquired the lock - guard: unsafe { self.data().read_guard() }, - key, - _phantom: PhantomData, + LockGuard { + // safety: we've already acquired the lock + guard: self.data().read_guard(), + key, + _phantom: PhantomData, + } } } @@ -482,7 +496,8 @@ impl<L: Sharable> BoxedLockCollection<L> { key: Key, ) -> Option<LockGuard<'key, L::ReadGuard<'g>, Key>> { let guard = unsafe { - if !utils::ordered_try_read(self.locks()) { + // safety: we have the thread key + if !self.raw_try_read() { return None; } diff --git a/src/collection/owned.rs b/src/collection/owned.rs index 69347cc..3ea93b6 100644 --- a/src/collection/owned.rs +++ b/src/collection/owned.rs @@ -1,3 +1,4 @@ +use std::cell::RefCell; use std::marker::PhantomData; use crate::lockable::{Lockable, LockableIntoInner, OwnedLockable, RawLock, Sharable}; @@ -11,7 +12,7 @@ fn get_locks<L: Lockable>(data: &L) -> Vec<&dyn RawLock> { locks } -unsafe impl<L: Lockable + Send + Sync> RawLock for OwnedLockCollection<L> { +unsafe impl<L: Lockable> RawLock for OwnedLockCollection<L> { fn kill(&self) { let locks = get_locks(&self.data); for lock in locks { @@ -21,8 +22,14 @@ unsafe impl<L: Lockable + Send + Sync> RawLock for OwnedLockCollection<L> { unsafe fn raw_lock(&self) { let locks = get_locks(&self.data); + let locked = RefCell::new(Vec::with_capacity(locks.len())); + scopeguard::defer_on_unwind! { + utils::attempt_to_recover_locks_from_panic(&locked) + }; + for lock in locks { lock.raw_lock(); + locked.borrow_mut().push(lock); } } @@ -40,8 +47,14 @@ unsafe impl<L: Lockable + Send + Sync> RawLock for OwnedLockCollection<L> { unsafe fn raw_read(&self) { let locks = get_locks(&self.data); + let locked = RefCell::new(Vec::with_capacity(locks.len())); + scopeguard::defer_on_unwind! { + utils::attempt_to_recover_reads_from_panic(&locked) + }; + for lock in locks { lock.raw_read(); + locked.borrow_mut().push(lock); } } @@ -205,15 +218,15 @@ impl<L: OwnedLockable> OwnedLockCollection<L> { &'g self, key: Key, ) -> LockGuard<'key, L::Guard<'g>, Key> { - let locks = get_locks(&self.data); - for lock in locks { + let guard = unsafe { // safety: we have the thread key, and these locks happen in a // predetermined order - unsafe { lock.raw_lock() }; - } + self.raw_lock(); + + // safety: we've locked all of this already + self.data.guard() + }; - // safety: we've locked all of this already - let guard = unsafe { self.data.guard() }; LockGuard { guard, key, @@ -250,9 +263,8 @@ impl<L: OwnedLockable> OwnedLockCollection<L> { &'g self, key: Key, ) -> Option<LockGuard<'key, L::Guard<'g>, Key>> { - let locks = get_locks(&self.data); let guard = unsafe { - if !utils::ordered_try_lock(&locks) { + if !self.raw_try_lock() { return None; } @@ -319,19 +331,16 @@ impl<L: Sharable> OwnedLockCollection<L> { &'g self, key: Key, ) -> LockGuard<'key, L::ReadGuard<'g>, Key> { - let locks = get_locks(&self.data); - for lock in locks { - // safety: we have the thread key, and these locks happen in a - // predetermined order - unsafe { lock.raw_read() }; - } - - // safety: we've locked all of this already - let guard = unsafe { self.data.read_guard() }; - LockGuard { - guard, - key, - _phantom: PhantomData, + unsafe { + // safety: we have the thread key + self.raw_read(); + + LockGuard { + // safety: we've already acquired the lock + guard: self.data.read_guard(), + key, + _phantom: PhantomData, + } } } @@ -365,9 +374,9 @@ impl<L: Sharable> OwnedLockCollection<L> { &'g self, key: Key, ) -> Option<LockGuard<'key, L::ReadGuard<'g>, Key>> { - let locks = get_locks(&self.data); let guard = unsafe { - if !utils::ordered_try_read(&locks) { + // safety: we have the thread key + if !self.raw_try_read() { return None; } diff --git a/src/collection/ref.rs b/src/collection/ref.rs index 0e2f057..31ef173 100644 --- a/src/collection/ref.rs +++ b/src/collection/ref.rs @@ -1,3 +1,4 @@ +use std::cell::RefCell; use std::fmt::Debug; use std::marker::PhantomData; @@ -44,7 +45,7 @@ where } } -unsafe impl<L: Lockable + Send + Sync> RawLock for RefLockCollection<'_, L> { +unsafe impl<L: Lockable> RawLock for RefLockCollection<'_, L> { fn kill(&self) { for lock in &self.locks { lock.kill(); @@ -52,8 +53,15 @@ unsafe impl<L: Lockable + Send + Sync> RawLock for RefLockCollection<'_, L> { } unsafe fn raw_lock(&self) { + let locks = &self.locks; + let locked = RefCell::new(Vec::with_capacity(locks.len())); + scopeguard::defer_on_unwind! { + utils::attempt_to_recover_locks_from_panic(&locked) + }; + for lock in &self.locks { lock.raw_lock(); + locked.borrow_mut().push(*lock); } } @@ -68,8 +76,15 @@ unsafe impl<L: Lockable + Send + Sync> RawLock for RefLockCollection<'_, L> { } unsafe fn raw_read(&self) { + let locks = &self.locks; + let locked = RefCell::new(Vec::with_capacity(locks.len())); + scopeguard::defer_on_unwind! { + utils::attempt_to_recover_reads_from_panic(&locked) + }; + for lock in &self.locks { lock.raw_read(); + locked.borrow_mut().push(*lock); } } @@ -229,14 +244,16 @@ impl<'a, L: Lockable> RefLockCollection<'a, L> { &'a self, key: Key, ) -> LockGuard<'key, L::Guard<'a>, Key> { - for lock in &self.locks { + let guard = unsafe { // safety: we have the thread key - unsafe { lock.raw_lock() }; - } + self.raw_lock(); + + // safety: we've locked all of this already + self.data.guard() + }; LockGuard { - // safety: we've already acquired the lock - guard: unsafe { self.data.guard() }, + guard, key, _phantom: PhantomData, } @@ -272,7 +289,7 @@ impl<'a, L: Lockable> RefLockCollection<'a, L> { key: Key, ) -> Option<LockGuard<'key, L::Guard<'a>, Key>> { let guard = unsafe { - if !utils::ordered_try_lock(&self.locks) { + if !self.raw_try_lock() { return None; } @@ -337,16 +354,16 @@ impl<'a, L: Sharable> RefLockCollection<'a, L> { &'a self, key: Key, ) -> LockGuard<'key, L::ReadGuard<'a>, Key> { - for lock in &self.locks { + unsafe { // safety: we have the thread key - unsafe { lock.raw_read() }; - } + self.raw_read(); - LockGuard { - // safety: we've already acquired the lock - guard: unsafe { self.data.read_guard() }, - key, - _phantom: PhantomData, + LockGuard { + // safety: we've already acquired the lock + guard: self.data.read_guard(), + key, + _phantom: PhantomData, + } } } @@ -381,7 +398,8 @@ impl<'a, L: Sharable> RefLockCollection<'a, L> { key: Key, ) -> Option<LockGuard<'key, L::ReadGuard<'a>, Key>> { let guard = unsafe { - if !utils::ordered_try_read(&self.locks) { + // safety: we have the thread key + if !self.raw_try_read() { return None; } diff --git a/src/collection/retry.rs b/src/collection/retry.rs index 05adc3e..e5246cd 100644 --- a/src/collection/retry.rs +++ b/src/collection/retry.rs @@ -1,8 +1,10 @@ +use crate::collection::utils; use crate::lockable::{ Lockable, LockableAsMut, LockableIntoInner, OwnedLockable, RawLock, Sharable, }; use crate::Keyable; +use std::cell::RefCell; use std::collections::HashSet; use std::marker::PhantomData; @@ -47,6 +49,11 @@ unsafe impl<L: Lockable> RawLock for RetryingLockCollection<L> { return; } + let locked = RefCell::new(Vec::with_capacity(locks.len())); + scopeguard::defer_on_unwind! { + utils::attempt_to_recover_locks_from_panic(&locked) + }; + unsafe { 'outer: loop { // safety: we have the thread key @@ -61,7 +68,9 @@ unsafe impl<L: Lockable> RawLock for RetryingLockCollection<L> { // it does return false, then the lock function is called // immediately after, causing a panic // safety: we have the thread key - if !lock.raw_try_lock() { + if lock.raw_try_lock() { + locked.borrow_mut().push(*lock) + } else { for lock in locks.iter().take(i) { // safety: we already locked all of these lock.raw_unlock(); @@ -91,10 +100,17 @@ unsafe impl<L: Lockable> RawLock for RetryingLockCollection<L> { return true; } + let locked = RefCell::new(Vec::with_capacity(locks.len())); + scopeguard::defer_on_unwind! { + utils::attempt_to_recover_locks_from_panic(&locked) + }; + unsafe { for (i, lock) in locks.iter().enumerate() { // safety: we have the thread key - if !lock.raw_try_lock() { + if lock.raw_try_lock() { + locked.borrow_mut().push(*lock); + } else { for lock in locks.iter().take(i) { // safety: we already locked all of these lock.raw_unlock(); @@ -119,6 +135,15 @@ unsafe impl<L: Lockable> RawLock for RetryingLockCollection<L> { let mut first_index = 0; let locks = get_locks(&self.data); + if locks.is_empty() { + return; + } + + let locked = RefCell::new(Vec::with_capacity(locks.len())); + scopeguard::defer_on_unwind! { + utils::attempt_to_recover_reads_from_panic(&locked) + }; + 'outer: loop { // safety: we have the thread key locks[first_index].raw_read(); @@ -128,7 +153,9 @@ unsafe impl<L: Lockable> RawLock for RetryingLockCollection<L> { } // safety: we have the thread key - if !lock.raw_try_read() { + if lock.raw_try_read() { + locked.borrow_mut().push(*lock); + } else { for lock in locks.iter().take(i) { // safety: we already locked all of these lock.raw_unlock_read(); @@ -154,10 +181,17 @@ unsafe impl<L: Lockable> RawLock for RetryingLockCollection<L> { return true; } + let locked = RefCell::new(Vec::with_capacity(locks.len())); + scopeguard::defer_on_unwind! { + utils::attempt_to_recover_reads_from_panic(&locked) + }; + unsafe { for (i, lock) in locks.iter().enumerate() { // safety: we have the thread key - if !lock.raw_try_read() { + if lock.raw_try_read() { + locked.borrow_mut().push(*lock); + } else { for lock in locks.iter().take(i) { // safety: we already locked all of these lock.raw_unlock_read(); diff --git a/src/collection/utils.rs b/src/collection/utils.rs index c114541..d845450 100644 --- a/src/collection/utils.rs +++ b/src/collection/utils.rs @@ -1,15 +1,23 @@ +use std::cell::RefCell; + use crate::lockable::RawLock; /// Locks the locks in the order they are given. This causes deadlock if the /// locks contain duplicates, or if this is called by multiple threads with the /// locks in different orders. pub unsafe fn ordered_try_lock(locks: &[&dyn RawLock]) -> bool { + let locked = RefCell::new(Vec::with_capacity(locks.len())); + scopeguard::defer_on_unwind! { + // safety: everything in locked is locked + attempt_to_recover_locks_from_panic(&locked) + }; + unsafe { for (i, lock) in locks.iter().enumerate() { // safety: we have the thread key - let success = lock.raw_try_lock(); - - if !success { + if lock.raw_try_lock() { + locked.borrow_mut().push(*lock); + } else { for lock in &locks[0..i] { // safety: this lock was already acquired lock.raw_unlock(); @@ -25,12 +33,18 @@ pub unsafe fn ordered_try_lock(locks: &[&dyn RawLock]) -> bool { /// Locks the locks in the order they are given. This causes deadlock if this /// is called by multiple threads with the locks in different orders. pub unsafe fn ordered_try_read(locks: &[&dyn RawLock]) -> bool { + let locked = RefCell::new(Vec::with_capacity(locks.len())); + scopeguard::defer_on_unwind! { + // safety: everything in locked is locked + attempt_to_recover_reads_from_panic(&locked) + }; + unsafe { for (i, lock) in locks.iter().enumerate() { // safety: we have the thread key - let success = lock.raw_try_read(); - - if !success { + if lock.raw_try_read() { + locked.borrow_mut().push(*lock); + } else { for lock in &locks[0..i] { // safety: this lock was already acquired lock.raw_unlock_read(); @@ -42,3 +56,19 @@ pub unsafe fn ordered_try_read(locks: &[&dyn RawLock]) -> bool { true } } + +pub unsafe fn attempt_to_recover_locks_from_panic(locked: &RefCell<Vec<&dyn RawLock>>) { + scopeguard::defer_on_unwind! { locked.borrow().iter().for_each(|l| l.kill()); }; + let mut locked = locked.borrow_mut(); + while let Some(locked_lock) = locked.pop() { + locked_lock.raw_unlock(); + } +} + +pub unsafe fn attempt_to_recover_reads_from_panic(locked: &RefCell<Vec<&dyn RawLock>>) { + scopeguard::defer_on_unwind! { locked.borrow().iter().for_each(|l| l.kill()); }; + let mut locked = locked.borrow_mut(); + while let Some(locked_lock) = locked.pop() { + locked_lock.raw_unlock_read(); + } +} diff --git a/src/mutex/mutex.rs b/src/mutex/mutex.rs index 1130110..e3f5303 100644 --- a/src/mutex/mutex.rs +++ b/src/mutex/mutex.rs @@ -18,6 +18,18 @@ unsafe impl<T: ?Sized, R: RawMutex> RawLock for Mutex<T, R> { unsafe fn raw_lock(&self) { assert!(!self.poison.is_poisoned(), "The mutex has been killed"); + scopeguard::defer_on_unwind! { + scopeguard::defer_on_unwind! { self.kill() }; + if self.raw_try_lock() { + self.raw_unlock(); + } else { + // We don't know whether this lock is locked by the current + // thread, or another thread. There's not much we can do other + // than kill it. + self.kill(); + } + } + self.raw.lock() } @@ -26,31 +38,49 @@ unsafe impl<T: ?Sized, R: RawMutex> RawLock for Mutex<T, R> { return false; } + scopeguard::defer_on_unwind! { + scopeguard::defer_on_unwind! { self.kill() }; + if self.raw_try_lock() { + self.raw_unlock(); + } else { + // We don't know whether this lock is locked by the current + // thread, or another thread. There's not much we can do other + // than kill it. + self.kill(); + } + } + self.raw.try_lock() } unsafe fn raw_unlock(&self) { + scopeguard::defer_on_unwind! { + scopeguard::defer_on_unwind! { self.kill() }; + if self.raw_try_lock() { + self.raw_unlock(); + } else { + // We don't know whether this lock is locked by the current + // thread, or another thread. There's not much we can do other + // than kill it. + self.kill(); + } + } + self.raw.unlock() } // this is the closest thing to a read we can get, but Sharable isn't // implemented for this unsafe fn raw_read(&self) { - assert!(!self.poison.is_poisoned(), "The mutex has been killed"); - - self.raw.lock() + self.raw_lock() } unsafe fn raw_try_read(&self) -> bool { - if self.poison.is_poisoned() { - return false; - } - - self.raw.try_lock() + self.raw_try_lock() } unsafe fn raw_unlock_read(&self) { - self.raw.unlock() + self.raw_unlock() } } diff --git a/src/rwlock/rwlock.rs b/src/rwlock/rwlock.rs index 86005e3..94c6062 100644 --- a/src/rwlock/rwlock.rs +++ b/src/rwlock/rwlock.rs @@ -21,6 +21,18 @@ unsafe impl<T: ?Sized, R: RawRwLock> RawLock for RwLock<T, R> { "The read-write lock has been killed" ); + scopeguard::defer_on_unwind! { + scopeguard::defer_on_unwind! { self.kill() }; + if self.raw_try_lock() { + self.raw_unlock(); + } else { + // We don't know whether this lock is locked by the current + // thread, or another thread. There's not much we can do other + // than kill it. + self.kill(); + } + } + self.raw.lock_exclusive() } @@ -29,10 +41,34 @@ unsafe impl<T: ?Sized, R: RawRwLock> RawLock for RwLock<T, R> { return false; } + scopeguard::defer_on_unwind! { + scopeguard::defer_on_unwind! { self.kill() }; + if self.raw_try_lock() { + self.raw_unlock(); + } else { + // We don't know whether this lock is locked by the current + // thread, or another thread. There's not much we can do other + // than kill it. + self.kill(); + } + } + self.raw.try_lock_exclusive() } unsafe fn raw_unlock(&self) { + scopeguard::defer_on_unwind! { + scopeguard::defer_on_unwind! { self.kill() }; + if self.raw_try_lock() { + self.raw_unlock(); + } else { + // We don't know whether this lock is locked by the current + // thread, or another thread. There's not much we can do other + // than kill it. + self.kill(); + } + } + self.raw.unlock_exclusive() } @@ -42,6 +78,18 @@ unsafe impl<T: ?Sized, R: RawRwLock> RawLock for RwLock<T, R> { "The read-write lock has been killed" ); + scopeguard::defer_on_unwind! { + scopeguard::defer_on_unwind! { self.kill() }; + if self.raw_try_read() { + self.raw_unlock_read(); + } else { + // We don't know whether this lock is locked by the current + // thread, or another thread. There's not much we can do other + // than kill it. + self.kill(); + } + } + self.raw.lock_shared() } @@ -50,10 +98,34 @@ unsafe impl<T: ?Sized, R: RawRwLock> RawLock for RwLock<T, R> { return false; } + scopeguard::defer_on_unwind! { + scopeguard::defer_on_unwind! { self.kill() }; + if self.raw_try_read() { + self.raw_unlock_read(); + } else { + // We don't know whether this lock is locked by the current + // thread, or another thread. There's not much we can do other + // than kill it. + self.kill(); + } + } + self.raw.try_lock_shared() } unsafe fn raw_unlock_read(&self) { + scopeguard::defer_on_unwind! { + scopeguard::defer_on_unwind! { self.kill() }; + if self.raw_try_read() { + self.raw_unlock_read(); + } else { + // We don't know whether this lock is locked by the current + // thread, or another thread. There's not much we can do other + // than kill it. + self.kill(); + } + } + self.raw.unlock_shared() } } |
