diff options
Diffstat (limited to 'src/collection')
| -rw-r--r-- | src/collection/boxed.rs | 9 | ||||
| -rw-r--r-- | src/collection/guard.rs | 5 | ||||
| -rw-r--r-- | src/collection/owned.rs | 4 | ||||
| -rw-r--r-- | src/collection/ref.rs | 6 | ||||
| -rw-r--r-- | src/collection/retry.rs | 4 | ||||
| -rw-r--r-- | src/collection/utils.rs | 29 |
6 files changed, 42 insertions, 15 deletions
diff --git a/src/collection/boxed.rs b/src/collection/boxed.rs index 1891119..0a30eac 100644 --- a/src/collection/boxed.rs +++ b/src/collection/boxed.rs @@ -23,7 +23,6 @@ unsafe impl<L: Lockable> RawLock for BoxedLockCollection<L> { } unsafe fn raw_try_write(&self) -> bool { - println!("{}", self.locks().len()); utils::ordered_try_write(self.locks()) } @@ -60,7 +59,10 @@ unsafe impl<L: Lockable> Lockable for BoxedLockCollection<L> { Self: 'a; fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn RawLock>) { - ptrs.push(self); + // Doing it this way means that if a boxed collection is put inside a + // different collection, it will use the other method of locking. However, + // this prevents duplicate locks in a collection. + ptrs.extend_from_slice(&self.locks); } unsafe fn guard(&self) -> Self::Guard<'_> { @@ -170,6 +172,7 @@ impl<L: Debug> Debug for BoxedLockCollection<L> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct(stringify!(BoxedLockCollection)) .field("data", &self.data) + // there's not much reason to show the sorted locks .finish_non_exhaustive() } } @@ -331,7 +334,7 @@ impl<L: Lockable> BoxedLockCollection<L> { // cast to *const () because fat pointers can't be converted to usize locks.sort_by_key(|lock| (&raw const **lock).cast::<()>() as usize); - // safety we're just changing the lifetimes + // safety: we're just changing the lifetimes let locks: Vec<&'static dyn RawLock> = std::mem::transmute(locks); let data = &raw const *data; Self { data, locks } diff --git a/src/collection/guard.rs b/src/collection/guard.rs index 78d9895..ab66ffe 100644 --- a/src/collection/guard.rs +++ b/src/collection/guard.rs @@ -12,6 +12,11 @@ impl<Guard: Hash> Hash for LockGuard<Guard> { } } +// No implementations of Eq, PartialEq, PartialOrd, or Ord +// You can't implement both PartialEq<Self> and PartialEq<T> +// It's easier to just implement neither and ask users to dereference +// This is less of a problem when using the scoped lock API + #[mutants::skip] #[cfg(not(tarpaulin_include))] impl<Guard: Debug> Debug for LockGuard<Guard> { diff --git a/src/collection/owned.rs b/src/collection/owned.rs index 68170d1..866d778 100644 --- a/src/collection/owned.rs +++ b/src/collection/owned.rs @@ -63,6 +63,9 @@ unsafe impl<L: Lockable> Lockable for OwnedLockCollection<L> { #[mutants::skip] // It's hard to test lkocks in an OwnedLockCollection, because they're owned #[cfg(not(tarpaulin_include))] fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn RawLock>) { + // It's ok to use self here, because the values in the collection already + // cannot be referenced anywhere else. It's necessary to use self as the lock + // because otherwise we will be handing out shared references to the child ptrs.push(self) } @@ -263,6 +266,7 @@ impl<L: OwnedLockable> OwnedLockCollection<L> { /// ``` pub fn try_lock(&self, key: ThreadKey) -> Result<LockGuard<L::Guard<'_>>, ThreadKey> { let guard = unsafe { + // safety: we've acquired the key if !self.raw_try_write() { return Err(key); } diff --git a/src/collection/ref.rs b/src/collection/ref.rs index 5f96533..e71624d 100644 --- a/src/collection/ref.rs +++ b/src/collection/ref.rs @@ -71,7 +71,9 @@ unsafe impl<L: Lockable> Lockable for RefLockCollection<'_, L> { Self: 'a; fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn RawLock>) { - ptrs.push(self) + // Just like with BoxedLockCollection, we need to return all the individual + // locks to avoid duplicates + ptrs.extend_from_slice(&self.locks); } unsafe fn guard(&self) -> Self::Guard<'_> { @@ -115,6 +117,7 @@ impl<L: Debug> Debug for RefLockCollection<'_, L> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct(stringify!(RefLockCollection)) .field("data", self.data) + // there's not much reason to show the sorting order .finish_non_exhaustive() } } @@ -314,6 +317,7 @@ impl<'a, L: Lockable> RefLockCollection<'a, L> { /// ``` pub fn try_lock(&self, key: ThreadKey) -> Result<LockGuard<L::Guard<'_>>, ThreadKey> { let guard = unsafe { + // safety: we have the thread key if !self.raw_try_write() { return Err(key); } diff --git a/src/collection/retry.rs b/src/collection/retry.rs index 70e5183..15f626d 100644 --- a/src/collection/retry.rs +++ b/src/collection/retry.rs @@ -244,7 +244,9 @@ unsafe impl<L: Lockable> Lockable for RetryingLockCollection<L> { Self: 'a; fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn RawLock>) { - ptrs.push(self) + // this collection, just like the sorting collection, must return all of its + // locks in order to check for duplication + self.data.get_ptrs(ptrs) } unsafe fn guard(&self) -> Self::Guard<'_> { diff --git a/src/collection/utils.rs b/src/collection/utils.rs index 59a68da..71a023e 100644 --- a/src/collection/utils.rs +++ b/src/collection/utils.rs @@ -4,14 +4,17 @@ use crate::handle_unwind::handle_unwind; use crate::lockable::{Lockable, RawLock, Sharable}; use crate::Keyable; +/// Returns a list of locks in the given collection and sorts them by their +/// memory address #[must_use] pub fn get_locks<L: Lockable>(data: &L) -> Vec<&dyn RawLock> { - let mut locks = Vec::new(); - data.get_ptrs(&mut locks); + let mut locks = get_locks_unsorted(data); locks.sort_by_key(|lock| &raw const **lock); locks } +/// Returns a list of locks from the data. Unlike the above function, this does +/// not do any sorting of the locks. #[must_use] pub fn get_locks_unsorted<L: Lockable>(data: &L) -> Vec<&dyn RawLock> { let mut locks = Vec::new(); @@ -121,7 +124,7 @@ pub unsafe fn ordered_try_read(locks: &[&dyn RawLock]) -> bool { ) } -pub fn scoped_write<'a, L: RawLock + Lockable, R>( +pub fn scoped_write<'a, L: RawLock + Lockable + ?Sized, R>( collection: &'a L, key: impl Keyable, f: impl FnOnce(L::DataMut<'a>) -> R, @@ -131,7 +134,10 @@ pub fn scoped_write<'a, L: RawLock + Lockable, R>( collection.raw_write(); // safety: we just locked this - let r = f(collection.data_mut()); + let r = handle_unwind( + || f(collection.data_mut()), + || collection.raw_unlock_write(), + ); // this ensures the key is held long enough drop(key); @@ -143,7 +149,7 @@ pub fn scoped_write<'a, L: RawLock + Lockable, R>( } } -pub fn scoped_try_write<'a, L: RawLock + Lockable, Key: Keyable, R>( +pub fn scoped_try_write<'a, L: RawLock + Lockable + ?Sized, Key: Keyable, R>( collection: &'a L, key: Key, f: impl FnOnce(L::DataMut<'a>) -> R, @@ -155,7 +161,10 @@ pub fn scoped_try_write<'a, L: RawLock + Lockable, Key: Keyable, R>( } // safety: we just locked this - let r = f(collection.data_mut()); + let r = handle_unwind( + || f(collection.data_mut()), + || collection.raw_unlock_write(), + ); // this ensures the key is held long enough drop(key); @@ -167,7 +176,7 @@ pub fn scoped_try_write<'a, L: RawLock + Lockable, Key: Keyable, R>( } } -pub fn scoped_read<'a, L: RawLock + Sharable, R>( +pub fn scoped_read<'a, L: RawLock + Sharable + ?Sized, R>( collection: &'a L, key: impl Keyable, f: impl FnOnce(L::DataRef<'a>) -> R, @@ -177,7 +186,7 @@ pub fn scoped_read<'a, L: RawLock + Sharable, R>( collection.raw_read(); // safety: we just locked this - let r = f(collection.data_ref()); + let r = handle_unwind(|| f(collection.data_ref()), || collection.raw_unlock_read()); // this ensures the key is held long enough drop(key); @@ -189,7 +198,7 @@ pub fn scoped_read<'a, L: RawLock + Sharable, R>( } } -pub fn scoped_try_read<'a, L: RawLock + Sharable, Key: Keyable, R>( +pub fn scoped_try_read<'a, L: RawLock + Sharable + ?Sized, Key: Keyable, R>( collection: &'a L, key: Key, f: impl FnOnce(L::DataRef<'a>) -> R, @@ -201,7 +210,7 @@ pub fn scoped_try_read<'a, L: RawLock + Sharable, Key: Keyable, R>( } // safety: we just locked this - let r = f(collection.data_ref()); + let r = handle_unwind(|| f(collection.data_ref()), || collection.raw_unlock_read()); // this ensures the key is held long enough drop(key); |
