summaryrefslogtreecommitdiff
path: root/src/collection
diff options
context:
space:
mode:
authorMica White <botahamec@outlook.com>2026-03-14 21:33:50 -0400
committerMica White <botahamec@outlook.com>2026-03-14 21:33:50 -0400
commit482b47f4ed786946acb324b60d9f7ae7dd8cc075 (patch)
tree4216f042597679f2b76adbb61d3ba9596e96a060 /src/collection
parentd9095d8fce59714f75019ecf68911d9931a1af15 (diff)
Apply clippy restrictionsHEADmain
Diffstat (limited to 'src/collection')
-rwxr-xr-xsrc/collection/boxed.rs6
-rwxr-xr-xsrc/collection/owned.rs4
-rwxr-xr-xsrc/collection/ref.rs4
-rwxr-xr-xsrc/collection/retry.rs53
4 files changed, 32 insertions, 35 deletions
diff --git a/src/collection/boxed.rs b/src/collection/boxed.rs
index 83675b5..1478120 100755
--- a/src/collection/boxed.rs
+++ b/src/collection/boxed.rs
@@ -141,7 +141,7 @@ impl<L: OwnedLockable, I: FromIterator<L> + OwnedLockable> FromIterator<L>
}
// safety: the RawLocks must be send because they come from the Send Lockable
-#[allow(clippy::non_send_fields_in_send_ty)]
+#[expect(clippy::non_send_fields_in_send_ty)]
unsafe impl<L: Send> Send for BoxedLockCollection<L> {}
unsafe impl<L: Sync> Sync for BoxedLockCollection<L> {}
@@ -327,7 +327,7 @@ impl<L: Lockable> BoxedLockCollection<L> {
locks.sort_by_key(|lock| (&raw const **lock).cast::<()>() as usize);
// safety: we're just changing the lifetimes
- let locks: Vec<&'static dyn RawLock> = std::mem::transmute(locks);
+ let locks: Vec<&'static dyn RawLock> = unsafe { std::mem::transmute(locks) };
let data = &raw const *data;
Self { child: data, locks }
}
@@ -821,7 +821,7 @@ mod tests {
}
#[test]
- #[allow(clippy::float_cmp)]
+ #[expect(clippy::float_cmp)]
fn uses_correct_default() {
let collection =
BoxedLockCollection::<(Mutex<f64>, Mutex<Option<i32>>, Mutex<usize>)>::default();
diff --git a/src/collection/owned.rs b/src/collection/owned.rs
index 516f1ea..a7ab1b6 100755
--- a/src/collection/owned.rs
+++ b/src/collection/owned.rs
@@ -368,7 +368,6 @@ impl<L: OwnedLockable> OwnedLockCollection<L> {
/// *guard.1 = "1";
/// let key = OwnedLockCollection::<(Mutex<i32>, Mutex<&str>)>::unlock(guard);
/// ```
- #[allow(clippy::missing_const_for_fn)]
pub fn unlock(guard: LockGuard<L::Guard<'_>>) -> ThreadKey {
drop(guard.guard);
guard.key
@@ -553,7 +552,6 @@ impl<L: Sharable> OwnedLockCollection<L> {
/// let mut guard = lock.read(key);
/// let key = OwnedLockCollection::<(RwLock<i32>, RwLock<&str>)>::unlock_read(guard);
/// ```
- #[allow(clippy::missing_const_for_fn)]
pub fn unlock_read(guard: LockGuard<L::ReadGuard<'_>>) -> ThreadKey {
drop(guard.guard);
guard.key
@@ -790,7 +788,6 @@ mod tests {
std::thread::scope(|s| {
s.spawn(|| {
let key = ThreadKey::get().unwrap();
- #[allow(unused)]
let guard = collection.lock(key);
std::mem::forget(guard);
});
@@ -817,7 +814,6 @@ mod tests {
std::thread::scope(|s| {
s.spawn(|| {
let key = ThreadKey::get().unwrap();
- #[allow(unused)]
let guard = collection.lock(key);
std::mem::forget(guard);
});
diff --git a/src/collection/ref.rs b/src/collection/ref.rs
index a995097..a422bb2 100755
--- a/src/collection/ref.rs
+++ b/src/collection/ref.rs
@@ -123,7 +123,7 @@ impl<L: Debug> Debug for RefLockCollection<'_, L> {
}
// safety: the RawLocks must be send because they come from the Send Lockable
-#[allow(clippy::non_send_fields_in_send_ty)]
+#[expect(clippy::non_send_fields_in_send_ty)]
unsafe impl<L: Send> Send for RefLockCollection<'_, L> {}
unsafe impl<L: Sync> Sync for RefLockCollection<'_, L> {}
@@ -418,7 +418,6 @@ impl<'a, L: Lockable> RefLockCollection<'a, L> {
/// *guard.1 = "1";
/// let key = RefLockCollection::<(Mutex<i32>, Mutex<&str>)>::unlock(guard);
/// ```
- #[allow(clippy::missing_const_for_fn)]
pub fn unlock(guard: LockGuard<L::Guard<'_>>) -> ThreadKey {
drop(guard.guard);
guard.key
@@ -604,7 +603,6 @@ impl<L: Sharable> RefLockCollection<'_, L> {
/// let mut guard = lock.read(key);
/// let key = RefLockCollection::<(RwLock<i32>, RwLock<&str>)>::unlock_read(guard);
/// ```
- #[allow(clippy::missing_const_for_fn)]
pub fn unlock_read(guard: LockGuard<L::ReadGuard<'_>>) -> ThreadKey {
drop(guard.guard);
guard.key
diff --git a/src/collection/retry.rs b/src/collection/retry.rs
index 4a5df6e..b9ac530 100755
--- a/src/collection/retry.rs
+++ b/src/collection/retry.rs
@@ -154,36 +154,39 @@ unsafe impl<L: Lockable> RawLock for RetryingLockCollection<L> {
handle_unwind(
|| 'outer: loop {
// safety: we have the thread key
- locks[first_index.get()].raw_read();
- for (i, lock) in locks.iter().enumerate() {
- if i == first_index.get() {
- continue;
- }
-
- // safety: we have the thread key
- if lock.raw_try_read() {
- locked.set(locked.get() + 1);
- } else {
- // safety: we already locked all of these
- attempt_to_recover_reads_from_panic(&locks[0..i]);
+ unsafe {
+ locks[first_index.get()].raw_read();
- if first_index.get() >= i {
- // safety: this is already locked and can't be unlocked
- // by the previous loop
- locks[first_index.get()].raw_unlock_read();
+ for (i, lock) in locks.iter().enumerate() {
+ if i == first_index.get() {
+ continue;
}
- // these are no longer locked
- locked.set(0);
+ // safety: we have the thread key
+ if lock.raw_try_read() {
+ locked.set(locked.get() + 1);
+ } else {
+ // safety: we already locked all of these
+ attempt_to_recover_reads_from_panic(&locks[0..i]);
+
+ if first_index.get() >= i {
+ // safety: this is already locked and can't be unlocked
+ // by the previous loop
+ locks[first_index.get()].raw_unlock_read();
+ }
+
+ // these are no longer locked
+ locked.set(0);
- // don't go into a spin loop, wait for this one to lock
- first_index.set(i);
- continue 'outer;
+ // don't go into a spin loop, wait for this one to lock
+ first_index.set(i);
+ continue 'outer;
+ }
}
- }
- // safety: we locked all the data
- break;
+ // safety: we locked all the data
+ break;
+ }
},
|| {
utils::attempt_to_recover_reads_from_panic(&locks[0..locked.get()]);
@@ -1019,7 +1022,7 @@ mod tests {
}
#[test]
- #[allow(clippy::float_cmp)]
+ #[expect(clippy::float_cmp)]
fn uses_correct_default() {
let collection =
RetryingLockCollection::<(RwLock<f64>, Mutex<Option<i32>>, Mutex<usize>)>::default();