summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorBotahamec <botahamec@outlook.com>2024-05-23 19:50:32 -0400
committerBotahamec <botahamec@outlook.com>2024-05-23 19:50:32 -0400
commitf81d4b40a007fecf6502a36b4c24a1e31807a731 (patch)
treeb4cc65f0ccbc118e47ede4e6556fa1123aae41c8 /src
parentfa39064fe2f3399d27762a23c54d4703d00bd199 (diff)
Comments
Diffstat (limited to 'src')
-rw-r--r--src/collection.rs20
-rw-r--r--src/collection/boxed.rs30
-rw-r--r--src/collection/owned.rs28
-rw-r--r--src/collection/ref.rs28
-rw-r--r--src/collection/utils.rs44
-rw-r--r--src/key.rs7
-rw-r--r--src/lib.rs3
-rw-r--r--src/lockable.rs28
-rw-r--r--src/mutex.rs5
-rw-r--r--src/mutex/guard.rs10
-rw-r--r--src/mutex/mutex.rs4
-rw-r--r--src/rwlock/write_lock.rs11
12 files changed, 139 insertions, 79 deletions
diff --git a/src/collection.rs b/src/collection.rs
index 5dc6946..27ec1c4 100644
--- a/src/collection.rs
+++ b/src/collection.rs
@@ -7,6 +7,7 @@ mod guard;
mod owned;
mod r#ref;
mod retry;
+mod utils;
/// Locks a collection of locks, which cannot be shared immutably.
///
@@ -24,6 +25,9 @@ mod retry;
///
/// [`Lockable`]: `crate::lockable::Lockable`
/// [`OwnedLockable`]: `crate::lockable::OwnedLockable`
+
+// this type caches the idea that no immutable references to the underlying
+// collection exist
#[derive(Debug)]
pub struct OwnedLockCollection<L> {
data: L,
@@ -47,6 +51,13 @@ pub struct OwnedLockCollection<L> {
///
/// [`Lockable`]: `crate::lockable::Lockable`
/// [`OwnedLockable`]: `crate::lockable::OwnedLockable`
+
+// This type was born when I eventually realized that I needed a self
+// referential structure. That used boxing, so I elected to make a more
+// efficient implementation (polonius please save us)
+
+// This type caches the sorting order of the locks and the fact that it doesn't
+// contain any duplicates.
pub struct RefLockCollection<'a, L> {
data: &'a L,
locks: Vec<&'a dyn RawLock>,
@@ -70,9 +81,14 @@ pub struct RefLockCollection<'a, L> {
///
/// [`Lockable`]: `crate::lockable::Lockable`
/// [`OwnedLockable`]: `crate::lockable::OwnedLockable`
+
+// This type caches the sorting order of the locks and the fact that it doesn't
+// contain any duplicates.
pub struct BoxedLockCollection<L> {
data: Box<L>,
- locks: Vec<&'static dyn RawLock>,
+ locks: Vec<&'static dyn RawLock>, // As far as you know, it's static.
+ // Believe it or not, saying the lifetime
+ // is static when it's not isn't UB
}
/// Locks a collection of locks using a retrying algorithm.
@@ -97,6 +113,8 @@ pub struct BoxedLockCollection<L> {
/// [`Lockable`]: `crate::lockable::Lockable`
/// [`OwnedLockable`]: `crate::lockable::OwnedLockable`
/// [livelocking]: https://en.wikipedia.org/wiki/Deadlock#Livelock
+
+// This type caches the fact that there are no duplicates
#[derive(Debug)]
pub struct RetryingLockCollection<L> {
data: L,
diff --git a/src/collection/boxed.rs b/src/collection/boxed.rs
index 224eedb..5ced6d1 100644
--- a/src/collection/boxed.rs
+++ b/src/collection/boxed.rs
@@ -4,7 +4,7 @@ use std::marker::PhantomData;
use crate::lockable::{Lockable, OwnedLockable, RawLock, Sharable};
use crate::Keyable;
-use super::{BoxedLockCollection, LockGuard};
+use super::{utils, BoxedLockCollection, LockGuard};
/// returns `true` if the sorted list contains a duplicate
#[must_use]
@@ -185,6 +185,8 @@ impl<L: Lockable> BoxedLockCollection<L> {
let data = Box::new(data);
let mut locks = Vec::new();
data.get_ptrs(&mut locks);
+
+ // cast to *const () because fat pointers can't be converted to usize
locks.sort_by_key(|lock| std::ptr::from_ref(*lock).cast::<()>() as usize);
// safety: the box will be dropped after the lock references, so it's
@@ -310,17 +312,8 @@ impl<L: Lockable> BoxedLockCollection<L> {
key: Key,
) -> Option<LockGuard<'key, L::Guard<'g>, Key>> {
let guard = unsafe {
- for (i, lock) in self.locks.iter().enumerate() {
- // safety: we have the thread key
- let success = lock.try_lock();
-
- if !success {
- for lock in &self.locks[0..i] {
- // safety: this lock was already acquired
- lock.unlock();
- }
- return None;
- }
+ if !utils::ordered_try_lock(&self.locks) {
+ return None;
}
// safety: we've acquired the locks
@@ -424,17 +417,8 @@ impl<L: Sharable> BoxedLockCollection<L> {
key: Key,
) -> Option<LockGuard<'key, L::ReadGuard<'g>, Key>> {
let guard = unsafe {
- for (i, lock) in self.locks.iter().enumerate() {
- // safety: we have the thread key
- let success = lock.try_read();
-
- if !success {
- for lock in &self.locks[0..i] {
- // safety: this lock was already acquired
- lock.unlock_read();
- }
- return None;
- }
+ if !utils::ordered_try_read(&self.locks) {
+ return None;
}
// safety: we've acquired the locks
diff --git a/src/collection/owned.rs b/src/collection/owned.rs
index e1549b2..919c403 100644
--- a/src/collection/owned.rs
+++ b/src/collection/owned.rs
@@ -3,7 +3,7 @@ use std::marker::PhantomData;
use crate::lockable::{Lockable, OwnedLockable, RawLock, Sharable};
use crate::Keyable;
-use super::{LockGuard, OwnedLockCollection};
+use super::{utils, LockGuard, OwnedLockCollection};
fn get_locks<L: Lockable>(data: &L) -> Vec<&dyn RawLock> {
let mut locks = Vec::new();
@@ -191,17 +191,8 @@ impl<L: OwnedLockable> OwnedLockCollection<L> {
) -> Option<LockGuard<'key, L::Guard<'g>, Key>> {
let locks = get_locks(&self.data);
let guard = unsafe {
- for (i, lock) in locks.iter().enumerate() {
- // safety: we have the thread key
- let success = lock.try_lock();
-
- if !success {
- for lock in &locks[0..i] {
- // safety: this lock was already acquired
- lock.unlock();
- }
- return None;
- }
+ if !utils::ordered_try_lock(&locks) {
+ return None;
}
// safety: we've acquired the locks
@@ -315,17 +306,8 @@ impl<L: Sharable> OwnedLockCollection<L> {
) -> Option<LockGuard<'key, L::ReadGuard<'g>, Key>> {
let locks = get_locks(&self.data);
let guard = unsafe {
- for (i, lock) in locks.iter().enumerate() {
- // safety: we have the thread key
- let success = lock.try_read();
-
- if !success {
- for lock in &locks[0..i] {
- // safety: this lock was already acquired
- lock.unlock();
- }
- return None;
- }
+ if !utils::ordered_try_read(&locks) {
+ return None;
}
// safety: we've acquired the locks
diff --git a/src/collection/ref.rs b/src/collection/ref.rs
index e5c548f..d8c7f2e 100644
--- a/src/collection/ref.rs
+++ b/src/collection/ref.rs
@@ -4,7 +4,7 @@ use std::marker::PhantomData;
use crate::lockable::{Lockable, OwnedLockable, RawLock, Sharable};
use crate::Keyable;
-use super::{LockGuard, RefLockCollection};
+use super::{utils, LockGuard, RefLockCollection};
#[must_use]
pub fn get_locks<L: Lockable>(data: &L) -> Vec<&dyn RawLock> {
@@ -221,17 +221,8 @@ impl<'a, L: Lockable> RefLockCollection<'a, L> {
key: Key,
) -> Option<LockGuard<'key, L::Guard<'a>, Key>> {
let guard = unsafe {
- for (i, lock) in self.locks.iter().enumerate() {
- // safety: we have the thread key
- let success = lock.try_lock();
-
- if !success {
- for lock in &self.locks[0..i] {
- // safety: this lock was already acquired
- lock.unlock();
- }
- return None;
- }
+ if !utils::ordered_try_lock(&self.locks) {
+ return None;
}
// safety: we've acquired the locks
@@ -339,17 +330,8 @@ impl<'a, L: Sharable> RefLockCollection<'a, L> {
key: Key,
) -> Option<LockGuard<'key, L::ReadGuard<'a>, Key>> {
let guard = unsafe {
- for (i, lock) in self.locks.iter().enumerate() {
- // safety: we have the thread key
- let success = lock.try_read();
-
- if !success {
- for lock in &self.locks[0..i] {
- // safety: this lock was already acquired
- lock.unlock_read();
- }
- return None;
- }
+ if !utils::ordered_try_read(&self.locks) {
+ return None;
}
// safety: we've acquired the locks
diff --git a/src/collection/utils.rs b/src/collection/utils.rs
new file mode 100644
index 0000000..dc58399
--- /dev/null
+++ b/src/collection/utils.rs
@@ -0,0 +1,44 @@
+use crate::lockable::RawLock;
+
+/// Locks the locks in the order they are given. This causes deadlock if the
+/// locks contain duplicates, or if this is called by multiple threads with the
+/// locks in different orders.
+pub unsafe fn ordered_try_lock(locks: &[&dyn RawLock]) -> bool {
+ unsafe {
+ for (i, lock) in locks.iter().enumerate() {
+ // safety: we have the thread key
+ let success = lock.try_lock();
+
+ if !success {
+ for lock in &locks[0..i] {
+ // safety: this lock was already acquired
+ lock.unlock();
+ }
+ return false;
+ }
+ }
+
+ true
+ }
+}
+
+/// Locks the locks in the order they are given. This causes deadlock f this is
+/// called by multiple threads with the locks in different orders.
+pub unsafe fn ordered_try_read(locks: &[&dyn RawLock]) -> bool {
+ unsafe {
+ for (i, lock) in locks.iter().enumerate() {
+ // safety: we have the thread key
+ let success = lock.try_read();
+
+ if !success {
+ for lock in &locks[0..i] {
+ // safety: this lock was already acquired
+ lock.unlock_read();
+ }
+ return false;
+ }
+ }
+
+ true
+ }
+}
diff --git a/src/key.rs b/src/key.rs
index 875f4be..4d6504f 100644
--- a/src/key.rs
+++ b/src/key.rs
@@ -7,6 +7,8 @@ use thread_local::ThreadLocal;
use sealed::Sealed;
+// Sealed to prevent other key types from being implemented. Otherwise, this
+// would almost instant undefined behavior.
mod sealed {
use super::ThreadKey;
@@ -15,6 +17,9 @@ mod sealed {
impl Sealed for &mut ThreadKey {}
}
+// I am concerned that having multiple crates linked together with different
+// static variables could break my key system. Library code probably shouldn't
+// be creating keys at all.
static KEY: Lazy<ThreadLocal<AtomicLock>> = Lazy::new(ThreadLocal::new);
/// The key for the current thread.
@@ -34,6 +39,7 @@ pub struct ThreadKey {
/// values invalid.
pub unsafe trait Keyable: Sealed {}
unsafe impl Keyable for ThreadKey {}
+// the ThreadKey can't be moved while a mutable reference to it exists
unsafe impl Keyable for &mut ThreadKey {}
impl Debug for ThreadKey {
@@ -42,6 +48,7 @@ impl Debug for ThreadKey {
}
}
+// If you lose the thread key, you can get it back by calling ThreadKey::get
impl Drop for ThreadKey {
fn drop(&mut self) {
unsafe { KEY.get().unwrap().force_unlock() }
diff --git a/src/lib.rs b/src/lib.rs
index 9c39c6d..643c3e7 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -176,6 +176,9 @@ pub use key::{Keyable, ThreadKey};
#[cfg(feature = "spin")]
pub use mutex::SpinLock;
+// Personally, I think re-exports look ugly in the rust documentation, so I
+// went with type aliases instead.
+
/// A collection of locks that can be acquired simultaneously.
///
/// This re-exports [`BoxedLockCollection`] as a sensible default.
diff --git a/src/lockable.rs b/src/lockable.rs
index 6b9c7c6..9f44981 100644
--- a/src/lockable.rs
+++ b/src/lockable.rs
@@ -14,6 +14,13 @@ use lock_api::{RawMutex, RawRwLock};
/// A deadlock must never occur. The `unlock` method must correctly unlock the
/// data. The `get_ptrs` method must be implemented correctly. The `Output`
/// must be unlocked when it is dropped.
+
+// Why not use a RawRwLock? Because that would be semantically incorrect, and I
+// don't want an INIT or GuardMarker associated item.
+// Originally, RawLock had a sister trait: RawSharableLock. I removed it
+// because it'd be difficult to implement a separate type that takes a
+// different kind of RawLock. But now the Sharable marker trait is needed to
+// indicate if reads can be used.
pub unsafe trait RawLock: Send + Sync {
/// Blocks until the lock is acquired
///
@@ -175,6 +182,8 @@ unsafe impl<T: Send, R: RawMutex + Send + Sync> RawLock for Mutex<T, R> {
self.raw().unlock()
}
+ // this is the closest thing to a read we can get, but Sharable isn't
+ // implemented for this
unsafe fn read(&self) {
self.raw().lock()
}
@@ -291,8 +300,13 @@ unsafe impl<'l, T: Send, R: RawRwLock + Send + Sync> Lockable for WriteLock<'l,
}
}
+// Technically, the exclusive locks can also be shared, but there's currently
+// no way to express that. I don't think I want to ever express that.
unsafe impl<'l, T: Send, R: RawRwLock + Send + Sync> Sharable for ReadLock<'l, T, R> {}
+// Because both ReadLock and WriteLock hold references to RwLocks, they can't
+// implement OwnedLockable
+
unsafe impl<T: Lockable> Lockable for &T {
type Guard<'g> = T::Guard<'g> where Self: 'g;
@@ -335,6 +349,8 @@ unsafe impl<T: Sharable> Sharable for &mut T {}
unsafe impl<T: OwnedLockable> OwnedLockable for &mut T {}
+/// Implements `Lockable`, `Sharable`, and `OwnedLockable` for tuples
+/// ex: `tuple_impls!(A B C, 0 1 2);`
macro_rules! tuple_impls {
($($generic:ident)*, $($value:tt)*) => {
unsafe impl<$($generic: Lockable,)*> Lockable for ($($generic,)*) {
@@ -347,6 +363,8 @@ macro_rules! tuple_impls {
}
unsafe fn guard(&self) -> Self::Guard<'_> {
+ // It's weird that this works
+ // I don't think any other way of doing it compiles
($(self.$value.guard(),)*)
}
@@ -381,6 +399,8 @@ unsafe impl<T: Lockable, const N: usize> Lockable for [T; N] {
}
unsafe fn guard<'g>(&'g self) -> Self::Guard<'g> {
+ // The MaybeInit helper functions for arrays aren't stable yet, so
+ // we'll just have to implement it ourselves
let mut guards = MaybeUninit::<[MaybeUninit<T::Guard<'g>>; N]>::uninit().assume_init();
for i in 0..N {
guards[i].write(self[i].guard());
@@ -430,7 +450,8 @@ unsafe impl<T: Lockable> Lockable for Box<[T]> {
}
unsafe impl<T: Lockable> Lockable for Vec<T> {
- type Guard<'g> = Vec<T::Guard<'g>> where Self: 'g;
+ // There's no reason why I'd ever want to extend a list of lock guards
+ type Guard<'g> = Box<[T::Guard<'g>]> where Self: 'g;
type ReadGuard<'g> = Box<[T::ReadGuard<'g>]> where Self: 'g;
@@ -446,7 +467,7 @@ unsafe impl<T: Lockable> Lockable for Vec<T> {
guards.push(lock.guard());
}
- guards
+ guards.into_boxed_slice()
}
unsafe fn read_guard(&self) -> Self::ReadGuard<'_> {
@@ -459,6 +480,9 @@ unsafe impl<T: Lockable> Lockable for Vec<T> {
}
}
+// I'd make a generic impl<T: Lockable, I: IntoIterator<Item=T>> Lockable for I
+// but I think that'd require sealing up this trait
+
unsafe impl<T: Sharable, const N: usize> Sharable for [T; N] {}
unsafe impl<T: Sharable> Sharable for Box<[T]> {}
unsafe impl<T: Sharable> Sharable for Vec<T> {}
diff --git a/src/mutex.rs b/src/mutex.rs
index a3baa00..b30c2b1 100644
--- a/src/mutex.rs
+++ b/src/mutex.rs
@@ -49,8 +49,11 @@ pub struct MutexRef<'a, T: ?Sized + 'a, R: RawMutex>(
///
/// [`lock`]: `Mutex::lock`
/// [`try_lock`]: `Mutex::try_lock`
+
+// This is the most lifetime-intensive thing I've ever written. Can I graduate
+// from borrow checker university now?
pub struct MutexGuard<'a, 'key: 'a, T: ?Sized + 'a, Key: Keyable + 'key, R: RawMutex> {
- mutex: MutexRef<'a, T, R>,
+ mutex: MutexRef<'a, T, R>, // this way we don't need to re-implement Drop
thread_key: Key,
_phantom: PhantomData<&'key ()>,
}
diff --git a/src/mutex/guard.rs b/src/mutex/guard.rs
index 9e8e2e6..f9324ad 100644
--- a/src/mutex/guard.rs
+++ b/src/mutex/guard.rs
@@ -8,6 +8,9 @@ use crate::key::Keyable;
use super::{Mutex, MutexGuard, MutexRef};
+// This makes things slightly easier because now you can use
+// `println!("{guard}")` instead of `println!("{}", *guard)`. I wonder if I
+// should implement some other standard library traits like this too?
impl<'a, T: Debug + ?Sized + 'a, R: RawMutex> Debug for MutexRef<'a, T, R> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
Debug::fmt(&**self, f)
@@ -63,11 +66,18 @@ impl<'a, T: ?Sized + 'a, R: RawMutex> AsMut<T> for MutexRef<'a, T, R> {
impl<'a, T: ?Sized + 'a, R: RawMutex> MutexRef<'a, T, R> {
/// Creates a reference to the underlying data of a mutex without
/// attempting to lock it or take ownership of the key.
+
+ // This might be useful to export, because it makes it easier to express
+ // the concept of: "Get the data out the mutex but don't lock it or take
+ // the key". But it's also quite dangerous to drop.
pub(crate) unsafe fn new(mutex: &'a Mutex<T, R>) -> Self {
Self(mutex, PhantomData)
}
}
+// it's kinda annoying to re-implement some of this stuff on guards
+// there's nothing i can do about that
+
impl<'a, 'key, T: Debug + ?Sized + 'a, Key: Keyable + 'key, R: RawMutex> Debug
for MutexGuard<'a, 'key, T, Key, R>
{
diff --git a/src/mutex/mutex.rs b/src/mutex/mutex.rs
index 52b6081..89dfef9 100644
--- a/src/mutex/mutex.rs
+++ b/src/mutex/mutex.rs
@@ -48,6 +48,7 @@ impl<T: ?Sized + Debug, R: RawMutex> Debug for Mutex<T, R> {
// safety: this is just a try lock, and the value is dropped
// immediately after, so there's no risk of blocking ourselves
// or any other threads
+ // when i implement try_clone this code will become less unsafe
if let Some(value) = unsafe { self.try_lock_no_key() } {
f.debug_struct("Mutex").field("data", &&*value).finish()
} else {
@@ -77,6 +78,9 @@ impl<T, R: RawMutex> From<T> for Mutex<T, R> {
}
}
+// We don't need a `get_mut` because we don't have mutex poisoning. Hurray!
+// This is safe because you can't have a mutable reference to the lock if it's
+// locked. Being locked requires an immutable reference because of the guard.
impl<T: ?Sized, R> AsMut<T> for Mutex<T, R> {
fn as_mut(&mut self) -> &mut T {
self.get_mut()
diff --git a/src/rwlock/write_lock.rs b/src/rwlock/write_lock.rs
index 8501cd8..2cf73cd 100644
--- a/src/rwlock/write_lock.rs
+++ b/src/rwlock/write_lock.rs
@@ -11,7 +11,9 @@ impl<'l, T: ?Sized + Debug, R: RawRwLock> Debug for WriteLock<'l, T, R> {
// safety: this is just a try lock, and the value is dropped
// immediately after, so there's no risk of blocking ourselves
// or any other threads
- if let Some(value) = unsafe { self.try_lock_no_key() } {
+ // It makes zero sense to try using an exclusive lock for this, so this
+ // is the only time when WriteLock does a read.
+ if let Some(value) = unsafe { self.0.try_read_no_key() } {
f.debug_struct("WriteLock").field("data", &&*value).finish()
} else {
struct LockedPlaceholder;
@@ -75,11 +77,8 @@ impl<'l, T: ?Sized, R: RawRwLock> WriteLock<'l, T, R> {
self.0.try_write(key)
}
- /// Attempts to create an exclusive lock without a key. Locking this
- /// without exclusive access to the key is undefined behavior.
- pub(crate) unsafe fn try_lock_no_key(&self) -> Option<RwLockWriteRef<'_, T, R>> {
- self.0.try_write_no_key()
- }
+ // There's no `try_lock_no_key`. Instead, `try_read_no_key` is called on
+ // the referenced `RwLock`.
/// Immediately drops the guard, and consequently releases the exclusive
/// lock.