From 7bd236853ef5ae705328c8fdc492cf60fc6887c1 Mon Sep 17 00:00:00 2001 From: Mica White Date: Wed, 13 Mar 2024 22:44:46 -0400 Subject: Lockable overhaul --- src/rwlock/write_lock.rs | 6 ------ 1 file changed, 6 deletions(-) (limited to 'src/rwlock/write_lock.rs') diff --git a/src/rwlock/write_lock.rs b/src/rwlock/write_lock.rs index d7333ae..c6b4c24 100644 --- a/src/rwlock/write_lock.rs +++ b/src/rwlock/write_lock.rs @@ -67,12 +67,6 @@ impl<'a, T: ?Sized, R: RawRwLock> WriteLock<'a, T, R> { self.0.write(key) } - /// Creates an exclusive lock without a key. Locking this without exclusive - /// access to the key is undefined behavior. - pub(crate) unsafe fn lock_no_key(&self) -> RwLockWriteRef<'_, T, R> { - self.0.write_no_key() - } - /// Attempts to lock the underlying [`RwLock`] with exclusive write access. pub fn try_lock<'s, 'key: 's, Key: Keyable + 'key>( &'s self, -- cgit v1.2.3 From cb28fb1ff3b5ea71c6fe11956015c7285cb3f3df Mon Sep 17 00:00:00 2001 From: Mica White Date: Tue, 21 May 2024 13:17:38 -0400 Subject: read-lock changes --- README.md | 6 +----- src/collection.rs | 2 +- src/lockable.rs | 4 ++-- src/rwlock.rs | 6 ++++-- src/rwlock/read_lock.rs | 16 ++++++++-------- src/rwlock/write_lock.rs | 16 ++++++++-------- 6 files changed, 24 insertions(+), 26 deletions(-) (limited to 'src/rwlock/write_lock.rs') diff --git a/README.md b/README.md index 51d8d67..66a4fc0 100644 --- a/README.md +++ b/README.md @@ -73,14 +73,10 @@ println!("{}", *data.1); **Avoid `LockCollection::try_new`.** This constructor will check to make sure that the collection contains no duplicate locks. This is an O(n^2) operation, where n is the number of locks in the collection. `LockCollection::new` and `LockCollection::new_ref` don't need these checks because they use `OwnedLockable`, which is guaranteed to be unique as long as it is accessible. As a last resort, `LockCollection::new_unchecked` doesn't do this check, but is unsafe to call. -**Avoid using distinct lock orders for `LockCollection`.** The problem is that this library must iterate through the list of locks, and not complete until every single one of them is unlocked. This also means that attempting to lock multiple mutexes gives you a lower chance of ever running. Only one needs to be locked for the operation to need a reset. This problem can be prevented by not doing that in your code. Resources should be obtained in the same order on every thread. - -**Avoid tuples in `LockCollection`.** Tuples become spinlocks if the first value is already unlocked. This will be fixed in the future. For now, if you need a tuple, make the lock that is most likely to be locked the first element. +**Avoid using distinct lock orders for `RetryingLockCollection`.** The problem is that this collection must iterate through the list of locks, and not complete until every single one of them is unlocked. This also means that attempting to lock multiple mutexes gives you a lower chance of ever running. Only one needs to be locked for the operation to need a reset. This problem can be prevented by not doing that in your code. Resources should be obtained in the same order on every thread. ## Future Work -Although this library is able to successfully prevent deadlocks, livelocks may still be an issue. Imagine thread 1 gets resource 1, thread 2 gets resource 2, thread 1 realizes it can't get resource 2, thread 2 realizes it can't get resource 1, thread 1 drops resource 1, thread 2 drops resource 2, and then repeat forever. In practice, this situation probably wouldn't last forever. But it would be nice if this could be prevented somehow. A more fair system for getting sets of locks would help, but I have no clue what that looks like. - It might to possible to break the `ThreadKey` system by having two crates import this crate and call `ThreadKey::get`. I'm not quite sure how this works, but Rust could decide to give each crate their own key, ergo one thread would get two keys. I don't think the standard library would have this issue. At a certain point, I have to recognize that someone could also just import the standard library mutex and get a deadlock that way. We should add `Condvar` at some point. I didn't because I've never used it before, and I'm probably not the right person to solve this problem. I think all the synchronization problems could be solved by having `Condvar::wait` take a `ThreadKey` instead of a `MutexGuard`. Something similar can probably be done for `Barrier`. But again, I'm no expert. diff --git a/src/collection.rs b/src/collection.rs index d9c56d3..a11d60c 100644 --- a/src/collection.rs +++ b/src/collection.rs @@ -1,4 +1,4 @@ -use std::{marker::PhantomData}; +use std::marker::PhantomData; use crate::{ key::Keyable, diff --git a/src/lockable.rs b/src/lockable.rs index fe14e8c..a5646e1 100644 --- a/src/lockable.rs +++ b/src/lockable.rs @@ -124,7 +124,7 @@ unsafe impl OwnedLockable for Mutex {} unsafe impl OwnedLockable for RwLock {} -unsafe impl<'r, T: Send + 'r, R: RawRwLock + Send + Sync + 'r> Lockable for ReadLock<'r, T, R> { +unsafe impl Lockable for ReadLock { type Guard<'g> = RwLockReadRef<'g, T, R> where Self: 'g; fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn Lock>) { @@ -136,7 +136,7 @@ unsafe impl<'r, T: Send + 'r, R: RawRwLock + Send + Sync + 'r> Lockable for Read } } -unsafe impl<'r, T: Send + 'r, R: RawRwLock + Send + Sync + 'r> Lockable for WriteLock<'r, T, R> { +unsafe impl Lockable for WriteLock { type Guard<'g> = RwLockWriteRef<'g, T, R> where Self: 'g; fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn Lock>) { diff --git a/src/rwlock.rs b/src/rwlock.rs index 7fb8c7a..40c5a6e 100644 --- a/src/rwlock.rs +++ b/src/rwlock.rs @@ -56,7 +56,8 @@ pub struct RwLock { /// that only read access is needed to the data. /// /// [`LockCollection`]: `crate::LockCollection` -pub struct ReadLock<'a, T: ?Sized, R>(&'a RwLock); +#[repr(transparent)] +pub struct ReadLock(RwLock); /// Grants write access to an [`RwLock`] /// @@ -64,7 +65,8 @@ pub struct ReadLock<'a, T: ?Sized, R>(&'a RwLock); /// that write access is needed to the data. /// /// [`LockCollection`]: `crate::LockCollection` -pub struct WriteLock<'a, T: ?Sized, R>(&'a RwLock); +#[repr(transparent)] +pub struct WriteLock(RwLock); /// RAII structure that unlocks the shared read access to a [`RwLock`] pub struct RwLockReadRef<'a, T: ?Sized, R: RawRwLock>( diff --git a/src/rwlock/read_lock.rs b/src/rwlock/read_lock.rs index 133ca7d..a8bb9be 100644 --- a/src/rwlock/read_lock.rs +++ b/src/rwlock/read_lock.rs @@ -6,7 +6,7 @@ use crate::key::Keyable; use super::{ReadLock, RwLock, RwLockReadGuard, RwLockReadRef}; -impl<'a, T: ?Sized + Debug, R: RawRwLock> Debug for ReadLock<'a, T, R> { +impl Debug for ReadLock { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { // safety: this is just a try lock, and the value is dropped // immediately after, so there's no risk of blocking ourselves @@ -28,19 +28,19 @@ impl<'a, T: ?Sized + Debug, R: RawRwLock> Debug for ReadLock<'a, T, R> { } } -impl<'a, T: ?Sized, R> From<&'a RwLock> for ReadLock<'a, T, R> { - fn from(value: &'a RwLock) -> Self { +impl From> for ReadLock { + fn from(value: RwLock) -> Self { Self::new(value) } } -impl<'a, T: ?Sized, R> AsRef> for ReadLock<'a, T, R> { +impl AsRef> for ReadLock { fn as_ref(&self) -> &RwLock { - self.0 + &self.0 } } -impl<'a, T: ?Sized, R> ReadLock<'a, T, R> { +impl ReadLock { /// Creates a new `ReadLock` which accesses the given [`RwLock`] /// /// # Examples @@ -52,12 +52,12 @@ impl<'a, T: ?Sized, R> ReadLock<'a, T, R> { /// let read_lock = ReadLock::new(&lock); /// ``` #[must_use] - pub const fn new(rwlock: &'a RwLock) -> Self { + pub const fn new(rwlock: RwLock) -> Self { Self(rwlock) } } -impl<'a, T: ?Sized, R: RawRwLock> ReadLock<'a, T, R> { +impl ReadLock { /// Locks the underlying [`RwLock`] with shared read access, blocking the /// current thread until it can be acquired. pub fn lock<'s, 'key: 's, Key: Keyable + 'key>( diff --git a/src/rwlock/write_lock.rs b/src/rwlock/write_lock.rs index c6b4c24..a344125 100644 --- a/src/rwlock/write_lock.rs +++ b/src/rwlock/write_lock.rs @@ -6,7 +6,7 @@ use crate::key::Keyable; use super::{RwLock, RwLockWriteGuard, RwLockWriteRef, WriteLock}; -impl<'a, T: ?Sized + Debug, R: RawRwLock> Debug for WriteLock<'a, T, R> { +impl Debug for WriteLock { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { // safety: this is just a try lock, and the value is dropped // immediately after, so there's no risk of blocking ourselves @@ -28,19 +28,19 @@ impl<'a, T: ?Sized + Debug, R: RawRwLock> Debug for WriteLock<'a, T, R> { } } -impl<'a, T: ?Sized, R> From<&'a RwLock> for WriteLock<'a, T, R> { - fn from(value: &'a RwLock) -> Self { +impl From> for WriteLock { + fn from(value: RwLock) -> Self { Self::new(value) } } -impl<'a, T: ?Sized, R> AsRef> for WriteLock<'a, T, R> { +impl AsRef> for WriteLock { fn as_ref(&self) -> &RwLock { - self.0 + &self.0 } } -impl<'a, T: ?Sized, R> WriteLock<'a, T, R> { +impl WriteLock { /// Creates a new `WriteLock` which accesses the given [`RwLock`] /// /// # Examples @@ -52,12 +52,12 @@ impl<'a, T: ?Sized, R> WriteLock<'a, T, R> { /// let write_lock = WriteLock::new(&lock); /// ``` #[must_use] - pub const fn new(rwlock: &'a RwLock) -> Self { + pub const fn new(rwlock: RwLock) -> Self { Self(rwlock) } } -impl<'a, T: ?Sized, R: RawRwLock> WriteLock<'a, T, R> { +impl WriteLock { /// Locks the underlying [`RwLock`] with exclusive write access, blocking /// the current until it can be acquired. pub fn lock<'s, 'key: 's, Key: Keyable + 'key>( -- cgit v1.2.3 From a4625296cb98a68a590ae1aa78b07f190a850f37 Mon Sep 17 00:00:00 2001 From: Botahamec Date: Tue, 21 May 2024 13:39:57 -0400 Subject: fix errors --- src/key.rs | 2 +- src/lib.rs | 8 ++++---- src/rwlock.rs | 6 +++--- src/rwlock/read_lock.rs | 12 ++++++------ src/rwlock/rwlock.rs | 2 +- src/rwlock/write_lock.rs | 10 +++++----- 6 files changed, 20 insertions(+), 20 deletions(-) (limited to 'src/rwlock/write_lock.rs') diff --git a/src/key.rs b/src/key.rs index 1cfa209..875f4be 100644 --- a/src/key.rs +++ b/src/key.rs @@ -20,7 +20,7 @@ static KEY: Lazy> = Lazy::new(ThreadLocal::new); /// The key for the current thread. /// /// Only one of these exist per thread. To get the current thread's key, call -/// [`ThreadKey::get`]. If the `ThreadKey` is dropped, it can be reobtained. +/// [`ThreadKey::get`]. If the `ThreadKey` is dropped, it can be re-obtained. pub struct ThreadKey { phantom: PhantomData<*const ()>, // implement !Send and !Sync } diff --git a/src/lib.rs b/src/lib.rs index 92b31a0..7e7930f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -22,10 +22,10 @@ //! 4. **partial allocation** //! //! This library seeks to solve **partial allocation** by requiring total -//! allocation. All of the resources a thread needs must be allocated at the -//! same time. In order to request new resources, the old resources must be -//! dropped first. Requesting multiple resources at once is atomic. You either -//! get all of the requested resources or none at all. +//! allocation. All the resources a thread needs must be allocated at the same +//! time. In order to request new resources, the old resources must be dropped +//! first. Requesting multiple resources at once is atomic. You either get all +//! the requested resources or none at all. //! //! # Performance //! diff --git a/src/rwlock.rs b/src/rwlock.rs index 40c5a6e..8f1ba8f 100644 --- a/src/rwlock.rs +++ b/src/rwlock.rs @@ -22,7 +22,7 @@ pub type ParkingRwLock = RwLock; /// A reader-writer lock /// /// This type of lock allows a number of readers or at most one writer at any -/// point in time. The write portion of thislock typically allows modification +/// point in time. The write portion of this lock typically allows modification /// of the underlying data (exclusive access) and the read portion of this lock /// typically allows for read-only access (shared access). /// @@ -57,7 +57,7 @@ pub struct RwLock { /// /// [`LockCollection`]: `crate::LockCollection` #[repr(transparent)] -pub struct ReadLock(RwLock); +pub struct ReadLock<'l, T: ?Sized, R>(&'l RwLock); /// Grants write access to an [`RwLock`] /// @@ -66,7 +66,7 @@ pub struct ReadLock(RwLock); /// /// [`LockCollection`]: `crate::LockCollection` #[repr(transparent)] -pub struct WriteLock(RwLock); +pub struct WriteLock<'l, T: ?Sized, R>(&'l RwLock); /// RAII structure that unlocks the shared read access to a [`RwLock`] pub struct RwLockReadRef<'a, T: ?Sized, R: RawRwLock>( diff --git a/src/rwlock/read_lock.rs b/src/rwlock/read_lock.rs index a8bb9be..011bd8c 100644 --- a/src/rwlock/read_lock.rs +++ b/src/rwlock/read_lock.rs @@ -28,8 +28,8 @@ impl Debug for ReadLock { } } -impl From> for ReadLock { - fn from(value: RwLock) -> Self { +impl<'l, T, R> From<&'l RwLock> for ReadLock<'l, T, R> { + fn from(value: &'l RwLock) -> Self { Self::new(value) } } @@ -40,7 +40,7 @@ impl AsRef> for ReadLock { } } -impl ReadLock { +impl<'l, T, R> ReadLock<'l, T, R> { /// Creates a new `ReadLock` which accesses the given [`RwLock`] /// /// # Examples @@ -52,12 +52,12 @@ impl ReadLock { /// let read_lock = ReadLock::new(&lock); /// ``` #[must_use] - pub const fn new(rwlock: RwLock) -> Self { + pub const fn new(rwlock: &'l RwLock) -> Self { Self(rwlock) } } -impl ReadLock { +impl<'l, T: ?Sized, R: RawRwLock> ReadLock<'l, T, R> { /// Locks the underlying [`RwLock`] with shared read access, blocking the /// current thread until it can be acquired. pub fn lock<'s, 'key: 's, Key: Keyable + 'key>( @@ -82,7 +82,7 @@ impl ReadLock { self.0.try_read_no_key() } - /// Immediately drops the guard, and consequentlyreleases the shared lock + /// Immediately drops the guard, and consequently releases the shared lock /// on the underlying [`RwLock`]. pub fn unlock<'key, Key: Keyable + 'key>(guard: RwLockReadGuard<'_, 'key, T, Key, R>) -> Key { RwLock::unlock_read(guard) diff --git a/src/rwlock/rwlock.rs b/src/rwlock/rwlock.rs index d16befe..7070b0e 100644 --- a/src/rwlock/rwlock.rs +++ b/src/rwlock/rwlock.rs @@ -254,7 +254,7 @@ impl RwLock { /// Attempts to lock this `RwLock` with exclusive write access. /// /// This function does not block. If the lock could not be acquired at this - /// time, then `None` is returned. Otherwise an RAII guard is returned + /// time, then `None` is returned. Otherwise, an RAII guard is returned /// which will release the lock when it is dropped. /// /// This function does not provide any guarantees with respect to the diff --git a/src/rwlock/write_lock.rs b/src/rwlock/write_lock.rs index a344125..1f7112a 100644 --- a/src/rwlock/write_lock.rs +++ b/src/rwlock/write_lock.rs @@ -21,15 +21,15 @@ impl Debug for WriteLock { } } - f.debug_struct("ReadLock") + f.debug_struct("WriteLock") .field("data", &LockedPlaceholder) .finish() } } } -impl From> for WriteLock { - fn from(value: RwLock) -> Self { +impl<'l, T, R> From<&'l RwLock> for WriteLock<'l, T, R> { + fn from(value: &'l RwLock) -> Self { Self::new(value) } } @@ -40,7 +40,7 @@ impl AsRef> for WriteLock { } } -impl WriteLock { +impl<'l, T, R> WriteLock<'l, T, R> { /// Creates a new `WriteLock` which accesses the given [`RwLock`] /// /// # Examples @@ -52,7 +52,7 @@ impl WriteLock { /// let write_lock = WriteLock::new(&lock); /// ``` #[must_use] - pub const fn new(rwlock: RwLock) -> Self { + pub const fn new(rwlock: &'l RwLock) -> Self { Self(rwlock) } } -- cgit v1.2.3 From cf49f2900fe3c7abd1bbadacfdc745d6b5bbc235 Mon Sep 17 00:00:00 2001 From: Botahamec Date: Tue, 21 May 2024 14:48:46 -0400 Subject: Fix the Dining Philosophers Problem --- examples/dining_philosophers.rs | 8 ++++++-- src/lockable.rs | 16 ++++------------ src/rwlock/read_lock.rs | 4 ++-- src/rwlock/write_lock.rs | 8 ++++---- 4 files changed, 16 insertions(+), 20 deletions(-) (limited to 'src/rwlock/write_lock.rs') diff --git a/examples/dining_philosophers.rs b/examples/dining_philosophers.rs index 2f2fa0d..1340564 100644 --- a/examples/dining_philosophers.rs +++ b/examples/dining_philosophers.rs @@ -61,9 +61,13 @@ impl Philosopher { } fn main() { - let handles = PHILOSOPHERS + let handles: Vec<_> = PHILOSOPHERS .iter() - .map(|philosopher| thread::spawn(move || philosopher.cycle())); + .map(|philosopher| thread::spawn(move || philosopher.cycle())) + // The `collect` is absolutely necessary, because we're using lazy + // iterators. If `collect` isn't used, then the thread won't spawn + // until we try to join on it. + .collect(); for handle in handles { _ = handle.join(); diff --git a/src/lockable.rs b/src/lockable.rs index a5646e1..9b3a4e4 100644 --- a/src/lockable.rs +++ b/src/lockable.rs @@ -124,7 +124,7 @@ unsafe impl OwnedLockable for Mutex {} unsafe impl OwnedLockable for RwLock {} -unsafe impl Lockable for ReadLock { +unsafe impl<'l, T: Send, R: RawRwLock + Send + Sync> Lockable for ReadLock<'l, T, R> { type Guard<'g> = RwLockReadRef<'g, T, R> where Self: 'g; fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn Lock>) { @@ -136,7 +136,7 @@ unsafe impl Lockable for ReadLock { } } -unsafe impl Lockable for WriteLock { +unsafe impl<'l, T: Send, R: RawRwLock + Send + Sync> Lockable for WriteLock<'l, T, R> { type Guard<'g> = RwLockWriteRef<'g, T, R> where Self: 'g; fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn Lock>) { @@ -342,19 +342,12 @@ unsafe impl OwnedLockable for (A, B, C, D, E) +unsafe impl + OwnedLockable for (A, B, C, D, E) { } unsafe impl< - 'a, A: OwnedLockable, B: OwnedLockable, C: OwnedLockable, @@ -366,7 +359,6 @@ unsafe impl< } unsafe impl< - 'a, A: OwnedLockable, B: OwnedLockable, C: OwnedLockable, diff --git a/src/rwlock/read_lock.rs b/src/rwlock/read_lock.rs index 011bd8c..29042b5 100644 --- a/src/rwlock/read_lock.rs +++ b/src/rwlock/read_lock.rs @@ -6,7 +6,7 @@ use crate::key::Keyable; use super::{ReadLock, RwLock, RwLockReadGuard, RwLockReadRef}; -impl Debug for ReadLock { +impl<'l, T: ?Sized + Debug, R: RawRwLock> Debug for ReadLock<'l, T, R> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { // safety: this is just a try lock, and the value is dropped // immediately after, so there's no risk of blocking ourselves @@ -34,7 +34,7 @@ impl<'l, T, R> From<&'l RwLock> for ReadLock<'l, T, R> { } } -impl AsRef> for ReadLock { +impl<'l, T: ?Sized, R> AsRef> for ReadLock<'l, T, R> { fn as_ref(&self) -> &RwLock { &self.0 } diff --git a/src/rwlock/write_lock.rs b/src/rwlock/write_lock.rs index 1f7112a..8501cd8 100644 --- a/src/rwlock/write_lock.rs +++ b/src/rwlock/write_lock.rs @@ -6,7 +6,7 @@ use crate::key::Keyable; use super::{RwLock, RwLockWriteGuard, RwLockWriteRef, WriteLock}; -impl Debug for WriteLock { +impl<'l, T: ?Sized + Debug, R: RawRwLock> Debug for WriteLock<'l, T, R> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { // safety: this is just a try lock, and the value is dropped // immediately after, so there's no risk of blocking ourselves @@ -34,9 +34,9 @@ impl<'l, T, R> From<&'l RwLock> for WriteLock<'l, T, R> { } } -impl AsRef> for WriteLock { +impl<'l, T: ?Sized, R> AsRef> for WriteLock<'l, T, R> { fn as_ref(&self) -> &RwLock { - &self.0 + self.0 } } @@ -57,7 +57,7 @@ impl<'l, T, R> WriteLock<'l, T, R> { } } -impl WriteLock { +impl<'l, T: ?Sized, R: RawRwLock> WriteLock<'l, T, R> { /// Locks the underlying [`RwLock`] with exclusive write access, blocking /// the current until it can be acquired. pub fn lock<'s, 'key: 's, Key: Keyable + 'key>( -- cgit v1.2.3 From f81d4b40a007fecf6502a36b4c24a1e31807a731 Mon Sep 17 00:00:00 2001 From: Botahamec Date: Thu, 23 May 2024 19:50:32 -0400 Subject: Comments --- src/collection.rs | 20 +++++++++++++++++++- src/collection/boxed.rs | 30 +++++++----------------------- src/collection/owned.rs | 28 +++++----------------------- src/collection/ref.rs | 28 +++++----------------------- src/collection/utils.rs | 44 ++++++++++++++++++++++++++++++++++++++++++++ src/key.rs | 7 +++++++ src/lib.rs | 3 +++ src/lockable.rs | 28 ++++++++++++++++++++++++++-- src/mutex.rs | 5 ++++- src/mutex/guard.rs | 10 ++++++++++ src/mutex/mutex.rs | 4 ++++ src/rwlock/write_lock.rs | 11 +++++------ 12 files changed, 139 insertions(+), 79 deletions(-) create mode 100644 src/collection/utils.rs (limited to 'src/rwlock/write_lock.rs') diff --git a/src/collection.rs b/src/collection.rs index 5dc6946..27ec1c4 100644 --- a/src/collection.rs +++ b/src/collection.rs @@ -7,6 +7,7 @@ mod guard; mod owned; mod r#ref; mod retry; +mod utils; /// Locks a collection of locks, which cannot be shared immutably. /// @@ -24,6 +25,9 @@ mod retry; /// /// [`Lockable`]: `crate::lockable::Lockable` /// [`OwnedLockable`]: `crate::lockable::OwnedLockable` + +// this type caches the idea that no immutable references to the underlying +// collection exist #[derive(Debug)] pub struct OwnedLockCollection { data: L, @@ -47,6 +51,13 @@ pub struct OwnedLockCollection { /// /// [`Lockable`]: `crate::lockable::Lockable` /// [`OwnedLockable`]: `crate::lockable::OwnedLockable` + +// This type was born when I eventually realized that I needed a self +// referential structure. That used boxing, so I elected to make a more +// efficient implementation (polonius please save us) + +// This type caches the sorting order of the locks and the fact that it doesn't +// contain any duplicates. pub struct RefLockCollection<'a, L> { data: &'a L, locks: Vec<&'a dyn RawLock>, @@ -70,9 +81,14 @@ pub struct RefLockCollection<'a, L> { /// /// [`Lockable`]: `crate::lockable::Lockable` /// [`OwnedLockable`]: `crate::lockable::OwnedLockable` + +// This type caches the sorting order of the locks and the fact that it doesn't +// contain any duplicates. pub struct BoxedLockCollection { data: Box, - locks: Vec<&'static dyn RawLock>, + locks: Vec<&'static dyn RawLock>, // As far as you know, it's static. + // Believe it or not, saying the lifetime + // is static when it's not isn't UB } /// Locks a collection of locks using a retrying algorithm. @@ -97,6 +113,8 @@ pub struct BoxedLockCollection { /// [`Lockable`]: `crate::lockable::Lockable` /// [`OwnedLockable`]: `crate::lockable::OwnedLockable` /// [livelocking]: https://en.wikipedia.org/wiki/Deadlock#Livelock + +// This type caches the fact that there are no duplicates #[derive(Debug)] pub struct RetryingLockCollection { data: L, diff --git a/src/collection/boxed.rs b/src/collection/boxed.rs index 224eedb..5ced6d1 100644 --- a/src/collection/boxed.rs +++ b/src/collection/boxed.rs @@ -4,7 +4,7 @@ use std::marker::PhantomData; use crate::lockable::{Lockable, OwnedLockable, RawLock, Sharable}; use crate::Keyable; -use super::{BoxedLockCollection, LockGuard}; +use super::{utils, BoxedLockCollection, LockGuard}; /// returns `true` if the sorted list contains a duplicate #[must_use] @@ -185,6 +185,8 @@ impl BoxedLockCollection { let data = Box::new(data); let mut locks = Vec::new(); data.get_ptrs(&mut locks); + + // cast to *const () because fat pointers can't be converted to usize locks.sort_by_key(|lock| std::ptr::from_ref(*lock).cast::<()>() as usize); // safety: the box will be dropped after the lock references, so it's @@ -310,17 +312,8 @@ impl BoxedLockCollection { key: Key, ) -> Option, Key>> { let guard = unsafe { - for (i, lock) in self.locks.iter().enumerate() { - // safety: we have the thread key - let success = lock.try_lock(); - - if !success { - for lock in &self.locks[0..i] { - // safety: this lock was already acquired - lock.unlock(); - } - return None; - } + if !utils::ordered_try_lock(&self.locks) { + return None; } // safety: we've acquired the locks @@ -424,17 +417,8 @@ impl BoxedLockCollection { key: Key, ) -> Option, Key>> { let guard = unsafe { - for (i, lock) in self.locks.iter().enumerate() { - // safety: we have the thread key - let success = lock.try_read(); - - if !success { - for lock in &self.locks[0..i] { - // safety: this lock was already acquired - lock.unlock_read(); - } - return None; - } + if !utils::ordered_try_read(&self.locks) { + return None; } // safety: we've acquired the locks diff --git a/src/collection/owned.rs b/src/collection/owned.rs index e1549b2..919c403 100644 --- a/src/collection/owned.rs +++ b/src/collection/owned.rs @@ -3,7 +3,7 @@ use std::marker::PhantomData; use crate::lockable::{Lockable, OwnedLockable, RawLock, Sharable}; use crate::Keyable; -use super::{LockGuard, OwnedLockCollection}; +use super::{utils, LockGuard, OwnedLockCollection}; fn get_locks(data: &L) -> Vec<&dyn RawLock> { let mut locks = Vec::new(); @@ -191,17 +191,8 @@ impl OwnedLockCollection { ) -> Option, Key>> { let locks = get_locks(&self.data); let guard = unsafe { - for (i, lock) in locks.iter().enumerate() { - // safety: we have the thread key - let success = lock.try_lock(); - - if !success { - for lock in &locks[0..i] { - // safety: this lock was already acquired - lock.unlock(); - } - return None; - } + if !utils::ordered_try_lock(&locks) { + return None; } // safety: we've acquired the locks @@ -315,17 +306,8 @@ impl OwnedLockCollection { ) -> Option, Key>> { let locks = get_locks(&self.data); let guard = unsafe { - for (i, lock) in locks.iter().enumerate() { - // safety: we have the thread key - let success = lock.try_read(); - - if !success { - for lock in &locks[0..i] { - // safety: this lock was already acquired - lock.unlock(); - } - return None; - } + if !utils::ordered_try_read(&locks) { + return None; } // safety: we've acquired the locks diff --git a/src/collection/ref.rs b/src/collection/ref.rs index e5c548f..d8c7f2e 100644 --- a/src/collection/ref.rs +++ b/src/collection/ref.rs @@ -4,7 +4,7 @@ use std::marker::PhantomData; use crate::lockable::{Lockable, OwnedLockable, RawLock, Sharable}; use crate::Keyable; -use super::{LockGuard, RefLockCollection}; +use super::{utils, LockGuard, RefLockCollection}; #[must_use] pub fn get_locks(data: &L) -> Vec<&dyn RawLock> { @@ -221,17 +221,8 @@ impl<'a, L: Lockable> RefLockCollection<'a, L> { key: Key, ) -> Option, Key>> { let guard = unsafe { - for (i, lock) in self.locks.iter().enumerate() { - // safety: we have the thread key - let success = lock.try_lock(); - - if !success { - for lock in &self.locks[0..i] { - // safety: this lock was already acquired - lock.unlock(); - } - return None; - } + if !utils::ordered_try_lock(&self.locks) { + return None; } // safety: we've acquired the locks @@ -339,17 +330,8 @@ impl<'a, L: Sharable> RefLockCollection<'a, L> { key: Key, ) -> Option, Key>> { let guard = unsafe { - for (i, lock) in self.locks.iter().enumerate() { - // safety: we have the thread key - let success = lock.try_read(); - - if !success { - for lock in &self.locks[0..i] { - // safety: this lock was already acquired - lock.unlock_read(); - } - return None; - } + if !utils::ordered_try_read(&self.locks) { + return None; } // safety: we've acquired the locks diff --git a/src/collection/utils.rs b/src/collection/utils.rs new file mode 100644 index 0000000..dc58399 --- /dev/null +++ b/src/collection/utils.rs @@ -0,0 +1,44 @@ +use crate::lockable::RawLock; + +/// Locks the locks in the order they are given. This causes deadlock if the +/// locks contain duplicates, or if this is called by multiple threads with the +/// locks in different orders. +pub unsafe fn ordered_try_lock(locks: &[&dyn RawLock]) -> bool { + unsafe { + for (i, lock) in locks.iter().enumerate() { + // safety: we have the thread key + let success = lock.try_lock(); + + if !success { + for lock in &locks[0..i] { + // safety: this lock was already acquired + lock.unlock(); + } + return false; + } + } + + true + } +} + +/// Locks the locks in the order they are given. This causes deadlock f this is +/// called by multiple threads with the locks in different orders. +pub unsafe fn ordered_try_read(locks: &[&dyn RawLock]) -> bool { + unsafe { + for (i, lock) in locks.iter().enumerate() { + // safety: we have the thread key + let success = lock.try_read(); + + if !success { + for lock in &locks[0..i] { + // safety: this lock was already acquired + lock.unlock_read(); + } + return false; + } + } + + true + } +} diff --git a/src/key.rs b/src/key.rs index 875f4be..4d6504f 100644 --- a/src/key.rs +++ b/src/key.rs @@ -7,6 +7,8 @@ use thread_local::ThreadLocal; use sealed::Sealed; +// Sealed to prevent other key types from being implemented. Otherwise, this +// would almost instant undefined behavior. mod sealed { use super::ThreadKey; @@ -15,6 +17,9 @@ mod sealed { impl Sealed for &mut ThreadKey {} } +// I am concerned that having multiple crates linked together with different +// static variables could break my key system. Library code probably shouldn't +// be creating keys at all. static KEY: Lazy> = Lazy::new(ThreadLocal::new); /// The key for the current thread. @@ -34,6 +39,7 @@ pub struct ThreadKey { /// values invalid. pub unsafe trait Keyable: Sealed {} unsafe impl Keyable for ThreadKey {} +// the ThreadKey can't be moved while a mutable reference to it exists unsafe impl Keyable for &mut ThreadKey {} impl Debug for ThreadKey { @@ -42,6 +48,7 @@ impl Debug for ThreadKey { } } +// If you lose the thread key, you can get it back by calling ThreadKey::get impl Drop for ThreadKey { fn drop(&mut self) { unsafe { KEY.get().unwrap().force_unlock() } diff --git a/src/lib.rs b/src/lib.rs index 9c39c6d..643c3e7 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -176,6 +176,9 @@ pub use key::{Keyable, ThreadKey}; #[cfg(feature = "spin")] pub use mutex::SpinLock; +// Personally, I think re-exports look ugly in the rust documentation, so I +// went with type aliases instead. + /// A collection of locks that can be acquired simultaneously. /// /// This re-exports [`BoxedLockCollection`] as a sensible default. diff --git a/src/lockable.rs b/src/lockable.rs index 6b9c7c6..9f44981 100644 --- a/src/lockable.rs +++ b/src/lockable.rs @@ -14,6 +14,13 @@ use lock_api::{RawMutex, RawRwLock}; /// A deadlock must never occur. The `unlock` method must correctly unlock the /// data. The `get_ptrs` method must be implemented correctly. The `Output` /// must be unlocked when it is dropped. + +// Why not use a RawRwLock? Because that would be semantically incorrect, and I +// don't want an INIT or GuardMarker associated item. +// Originally, RawLock had a sister trait: RawSharableLock. I removed it +// because it'd be difficult to implement a separate type that takes a +// different kind of RawLock. But now the Sharable marker trait is needed to +// indicate if reads can be used. pub unsafe trait RawLock: Send + Sync { /// Blocks until the lock is acquired /// @@ -175,6 +182,8 @@ unsafe impl RawLock for Mutex { self.raw().unlock() } + // this is the closest thing to a read we can get, but Sharable isn't + // implemented for this unsafe fn read(&self) { self.raw().lock() } @@ -291,8 +300,13 @@ unsafe impl<'l, T: Send, R: RawRwLock + Send + Sync> Lockable for WriteLock<'l, } } +// Technically, the exclusive locks can also be shared, but there's currently +// no way to express that. I don't think I want to ever express that. unsafe impl<'l, T: Send, R: RawRwLock + Send + Sync> Sharable for ReadLock<'l, T, R> {} +// Because both ReadLock and WriteLock hold references to RwLocks, they can't +// implement OwnedLockable + unsafe impl Lockable for &T { type Guard<'g> = T::Guard<'g> where Self: 'g; @@ -335,6 +349,8 @@ unsafe impl Sharable for &mut T {} unsafe impl OwnedLockable for &mut T {} +/// Implements `Lockable`, `Sharable`, and `OwnedLockable` for tuples +/// ex: `tuple_impls!(A B C, 0 1 2);` macro_rules! tuple_impls { ($($generic:ident)*, $($value:tt)*) => { unsafe impl<$($generic: Lockable,)*> Lockable for ($($generic,)*) { @@ -347,6 +363,8 @@ macro_rules! tuple_impls { } unsafe fn guard(&self) -> Self::Guard<'_> { + // It's weird that this works + // I don't think any other way of doing it compiles ($(self.$value.guard(),)*) } @@ -381,6 +399,8 @@ unsafe impl Lockable for [T; N] { } unsafe fn guard<'g>(&'g self) -> Self::Guard<'g> { + // The MaybeInit helper functions for arrays aren't stable yet, so + // we'll just have to implement it ourselves let mut guards = MaybeUninit::<[MaybeUninit>; N]>::uninit().assume_init(); for i in 0..N { guards[i].write(self[i].guard()); @@ -430,7 +450,8 @@ unsafe impl Lockable for Box<[T]> { } unsafe impl Lockable for Vec { - type Guard<'g> = Vec> where Self: 'g; + // There's no reason why I'd ever want to extend a list of lock guards + type Guard<'g> = Box<[T::Guard<'g>]> where Self: 'g; type ReadGuard<'g> = Box<[T::ReadGuard<'g>]> where Self: 'g; @@ -446,7 +467,7 @@ unsafe impl Lockable for Vec { guards.push(lock.guard()); } - guards + guards.into_boxed_slice() } unsafe fn read_guard(&self) -> Self::ReadGuard<'_> { @@ -459,6 +480,9 @@ unsafe impl Lockable for Vec { } } +// I'd make a generic impl> Lockable for I +// but I think that'd require sealing up this trait + unsafe impl Sharable for [T; N] {} unsafe impl Sharable for Box<[T]> {} unsafe impl Sharable for Vec {} diff --git a/src/mutex.rs b/src/mutex.rs index a3baa00..b30c2b1 100644 --- a/src/mutex.rs +++ b/src/mutex.rs @@ -49,8 +49,11 @@ pub struct MutexRef<'a, T: ?Sized + 'a, R: RawMutex>( /// /// [`lock`]: `Mutex::lock` /// [`try_lock`]: `Mutex::try_lock` + +// This is the most lifetime-intensive thing I've ever written. Can I graduate +// from borrow checker university now? pub struct MutexGuard<'a, 'key: 'a, T: ?Sized + 'a, Key: Keyable + 'key, R: RawMutex> { - mutex: MutexRef<'a, T, R>, + mutex: MutexRef<'a, T, R>, // this way we don't need to re-implement Drop thread_key: Key, _phantom: PhantomData<&'key ()>, } diff --git a/src/mutex/guard.rs b/src/mutex/guard.rs index 9e8e2e6..f9324ad 100644 --- a/src/mutex/guard.rs +++ b/src/mutex/guard.rs @@ -8,6 +8,9 @@ use crate::key::Keyable; use super::{Mutex, MutexGuard, MutexRef}; +// This makes things slightly easier because now you can use +// `println!("{guard}")` instead of `println!("{}", *guard)`. I wonder if I +// should implement some other standard library traits like this too? impl<'a, T: Debug + ?Sized + 'a, R: RawMutex> Debug for MutexRef<'a, T, R> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { Debug::fmt(&**self, f) @@ -63,11 +66,18 @@ impl<'a, T: ?Sized + 'a, R: RawMutex> AsMut for MutexRef<'a, T, R> { impl<'a, T: ?Sized + 'a, R: RawMutex> MutexRef<'a, T, R> { /// Creates a reference to the underlying data of a mutex without /// attempting to lock it or take ownership of the key. + + // This might be useful to export, because it makes it easier to express + // the concept of: "Get the data out the mutex but don't lock it or take + // the key". But it's also quite dangerous to drop. pub(crate) unsafe fn new(mutex: &'a Mutex) -> Self { Self(mutex, PhantomData) } } +// it's kinda annoying to re-implement some of this stuff on guards +// there's nothing i can do about that + impl<'a, 'key, T: Debug + ?Sized + 'a, Key: Keyable + 'key, R: RawMutex> Debug for MutexGuard<'a, 'key, T, Key, R> { diff --git a/src/mutex/mutex.rs b/src/mutex/mutex.rs index 52b6081..89dfef9 100644 --- a/src/mutex/mutex.rs +++ b/src/mutex/mutex.rs @@ -48,6 +48,7 @@ impl Debug for Mutex { // safety: this is just a try lock, and the value is dropped // immediately after, so there's no risk of blocking ourselves // or any other threads + // when i implement try_clone this code will become less unsafe if let Some(value) = unsafe { self.try_lock_no_key() } { f.debug_struct("Mutex").field("data", &&*value).finish() } else { @@ -77,6 +78,9 @@ impl From for Mutex { } } +// We don't need a `get_mut` because we don't have mutex poisoning. Hurray! +// This is safe because you can't have a mutable reference to the lock if it's +// locked. Being locked requires an immutable reference because of the guard. impl AsMut for Mutex { fn as_mut(&mut self) -> &mut T { self.get_mut() diff --git a/src/rwlock/write_lock.rs b/src/rwlock/write_lock.rs index 8501cd8..2cf73cd 100644 --- a/src/rwlock/write_lock.rs +++ b/src/rwlock/write_lock.rs @@ -11,7 +11,9 @@ impl<'l, T: ?Sized + Debug, R: RawRwLock> Debug for WriteLock<'l, T, R> { // safety: this is just a try lock, and the value is dropped // immediately after, so there's no risk of blocking ourselves // or any other threads - if let Some(value) = unsafe { self.try_lock_no_key() } { + // It makes zero sense to try using an exclusive lock for this, so this + // is the only time when WriteLock does a read. + if let Some(value) = unsafe { self.0.try_read_no_key() } { f.debug_struct("WriteLock").field("data", &&*value).finish() } else { struct LockedPlaceholder; @@ -75,11 +77,8 @@ impl<'l, T: ?Sized, R: RawRwLock> WriteLock<'l, T, R> { self.0.try_write(key) } - /// Attempts to create an exclusive lock without a key. Locking this - /// without exclusive access to the key is undefined behavior. - pub(crate) unsafe fn try_lock_no_key(&self) -> Option> { - self.0.try_write_no_key() - } + // There's no `try_lock_no_key`. Instead, `try_read_no_key` is called on + // the referenced `RwLock`. /// Immediately drops the guard, and consequently releases the exclusive /// lock. -- cgit v1.2.3 From 8ecf29cfe2a74d02b2c4bcb7f7ad1a811dc38dfe Mon Sep 17 00:00:00 2001 From: Botahamec Date: Thu, 23 May 2024 19:55:41 -0400 Subject: Unused imports --- src/rwlock/rwlock.rs | 2 +- src/rwlock/write_lock.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'src/rwlock/write_lock.rs') diff --git a/src/rwlock/rwlock.rs b/src/rwlock/rwlock.rs index 5b6065a..5bff5a3 100644 --- a/src/rwlock/rwlock.rs +++ b/src/rwlock/rwlock.rs @@ -5,7 +5,7 @@ use lock_api::RawRwLock; use crate::key::Keyable; -use super::{RwLock, RwLockReadGuard, RwLockReadRef, RwLockWriteGuard, RwLockWriteRef}; +use super::{RwLock, RwLockReadGuard, RwLockReadRef, RwLockWriteGuard}; impl RwLock { /// Creates a new instance of an `RwLock` which is unlocked. diff --git a/src/rwlock/write_lock.rs b/src/rwlock/write_lock.rs index 2cf73cd..15eaacc 100644 --- a/src/rwlock/write_lock.rs +++ b/src/rwlock/write_lock.rs @@ -4,7 +4,7 @@ use lock_api::RawRwLock; use crate::key::Keyable; -use super::{RwLock, RwLockWriteGuard, RwLockWriteRef, WriteLock}; +use super::{RwLock, RwLockWriteGuard, WriteLock}; impl<'l, T: ?Sized + Debug, R: RawRwLock> Debug for WriteLock<'l, T, R> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { -- cgit v1.2.3