summaryrefslogtreecommitdiff
path: root/src/mutex.rs
diff options
context:
space:
mode:
authorMica White <botahamec@gmail.com>2024-12-01 15:28:44 -0500
committerMica White <botahamec@gmail.com>2024-12-01 15:29:19 -0500
commit48aaedad542b9c6cbdc85d22517cd0d151f38443 (patch)
treeb5b197c47476e88b9926852c73a84f24b6497c77 /src/mutex.rs
parent0140f58043a2a00312d31907253cc718985e1e6c (diff)
Unit testing
Diffstat (limited to 'src/mutex.rs')
-rw-r--r--src/mutex.rs156
1 files changed, 156 insertions, 0 deletions
diff --git a/src/mutex.rs b/src/mutex.rs
index 433ab47..ae5efc8 100644
--- a/src/mutex.rs
+++ b/src/mutex.rs
@@ -28,6 +28,101 @@ pub type ParkingMutex<T> = Mutex<T, parking_lot::RawMutex>;
/// Locking the mutex on a thread that already locked it is impossible, due to
/// the requirement of the [`ThreadKey`]. Therefore, this will never deadlock.
///
+/// # Examples
+///
+/// ```
+/// use std::sync::Arc;
+/// use std::thread;
+/// use std::sync::mpsc;
+///
+/// use happylock::{Mutex, ThreadKey};
+///
+/// // Spawn a few threads to increment a shared variable (non-atomically),
+/// // and let the main thread know once all increments are done.
+/// //
+/// // Here we're using an Arc to share memory among threads, and the data
+/// // inside the Arc is protected with a mutex.
+/// const N: usize = 10;
+///
+/// let data = Arc::new(Mutex::new(0));
+///
+/// let (tx, rx) = mpsc::channel();
+/// for _ in 0..N {
+/// let (data, tx) = (Arc::clone(&data), tx.clone());
+/// thread::spawn(move || {
+/// let key = ThreadKey::get().unwrap();
+/// let mut data = data.lock(key);
+/// *data += 1;
+/// if *data == N {
+/// tx.send(()).unwrap();
+/// }
+/// // the lock is unlocked
+/// });
+/// }
+///
+/// rx.recv().unwrap();
+/// ```
+///
+/// To unlock a mutex guard sooner than the end of the enclosing scope, either
+/// create an inner scope, drop the guard manually, or call [`Mutex::unlock`].
+///
+/// ```
+/// use std::sync::Arc;
+/// use std::thread;
+///
+/// use happylock::{Mutex, ThreadKey};
+///
+/// const N: usize = 3;
+///
+/// let data_mutex = Arc::new(Mutex::new(vec![1, 2, 3, 4]));
+/// let res_mutex = Arc::new(Mutex::new(0));
+///
+/// let mut threads = Vec::with_capacity(N);
+/// (0..N).for_each(|_| {
+/// let data_mutex_clone = Arc::clone(&data_mutex);
+/// let res_mutex_clone = Arc::clone(&res_mutex);
+///
+/// threads.push(thread::spawn(move || {
+/// let mut key = ThreadKey::get().unwrap();
+///
+/// // Here we use a block to limit the lifetime of the lock guard.
+/// let result = {
+/// let mut data = data_mutex_clone.lock(&mut key);
+/// let result = data.iter().fold(0, |acc, x| acc + x * 2);
+/// data.push(result);
+/// result
+/// // The mutex guard gets dropped here, so the lock is released
+/// };
+/// // The thread key is available again
+/// *res_mutex_clone.lock(key) += result;
+/// }));
+/// });
+///
+/// let mut key = ThreadKey::get().unwrap();
+/// let mut data = data_mutex.lock(&mut key);
+/// let result = data.iter().fold(0, |acc, x| acc + x * 2);
+/// data.push(result);
+///
+/// // We drop the `data` explicitly because it's not necessary anymore. This
+/// // allows other threads to start working on the data immediately. Dropping
+/// // the data also gives us access to the thread key, so we can lock
+/// // another mutex.
+/// drop(data);
+///
+/// // Here the mutex guard is not assigned to a variable and so, even if the
+/// // scope does not end after this line, the mutex is still released: there is
+/// // no deadlock.
+/// *res_mutex.lock(&mut key) += result;
+///
+/// threads.into_iter().for_each(|thread| {
+/// thread
+/// .join()
+/// .expect("The thread creating or execution failed !")
+/// });
+///
+/// assert_eq!(*res_mutex.lock(key), 800);
+/// ```
+///
/// [`lock`]: `Mutex::lock`
/// [`try_lock`]: `Mutex::try_lock`
/// [`ThreadKey`]: `crate::ThreadKey`
@@ -61,3 +156,64 @@ pub struct MutexGuard<'a, 'key: 'a, T: ?Sized + 'a, Key: Keyable + 'key, R: RawM
thread_key: Key,
_phantom: PhantomData<&'key ()>,
}
+
+#[cfg(test)]
+mod tests {
+ use crate::ThreadKey;
+
+ use super::*;
+
+ #[test]
+ fn unlocked_when_initialized() {
+ let lock: crate::Mutex<_> = Mutex::new("Hello, world!");
+
+ assert!(!lock.is_locked());
+ }
+
+ #[test]
+ fn locked_after_read() {
+ let key = ThreadKey::get().unwrap();
+ let lock: crate::Mutex<_> = Mutex::new("Hello, world!");
+
+ let guard = lock.lock(key);
+
+ assert!(lock.is_locked());
+ drop(guard)
+ }
+
+ #[test]
+ fn display_works_for_guard() {
+ let key = ThreadKey::get().unwrap();
+ let mutex: crate::Mutex<_> = Mutex::new("Hello, world!");
+ let guard = mutex.lock(key);
+ assert_eq!(guard.to_string(), "Hello, world!".to_string());
+ }
+
+ #[test]
+ fn display_works_for_ref() {
+ let mutex: crate::Mutex<_> = Mutex::new("Hello, world!");
+ let guard = unsafe { mutex.try_lock_no_key().unwrap() }; // TODO lock_no_key
+ assert_eq!(guard.to_string(), "Hello, world!".to_string());
+ }
+
+ #[test]
+ fn dropping_guard_releases_mutex() {
+ let mut key = ThreadKey::get().unwrap();
+ let mutex: crate::Mutex<_> = Mutex::new("Hello, world!");
+
+ let guard = mutex.lock(&mut key);
+ drop(guard);
+
+ assert!(!mutex.is_locked());
+ }
+
+ #[test]
+ fn dropping_ref_releases_mutex() {
+ let mutex: crate::Mutex<_> = Mutex::new("Hello, world!");
+
+ let guard = unsafe { mutex.try_lock_no_key().unwrap() };
+ drop(guard);
+
+ assert!(!mutex.is_locked());
+ }
+}