summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMica White <botahamec@outlook.com>2025-12-08 20:14:03 -0500
committerMica White <botahamec@outlook.com>2025-12-08 20:14:03 -0500
commitc31f4ce84c3c8b3f89a05890df775d4e766aaadb (patch)
tree40169c1240717002197c85985f9bb652dd4b0af8
First commitHEADmain
-rwxr-xr-x.gitignore1
-rwxr-xr-x.vscode/settings.json4
-rwxr-xr-xCargo.lock230
-rwxr-xr-xCargo.toml19
-rwxr-xr-xLICENSE23
-rwxr-xr-xexamples/happylock.rs77
-rwxr-xr-xrustfmt.toml3
-rwxr-xr-xsrc/futex.rs535
-rwxr-xr-xsrc/futex/linux.rs61
-rwxr-xr-xsrc/futex/windows.rs114
-rwxr-xr-xsrc/lazy_box.rs97
-rwxr-xr-xsrc/lib.rs187
-rwxr-xr-xsrc/no_threads.rs122
-rwxr-xr-xsrc/pthread.rs118
-rwxr-xr-xsrc/spin.rs68
-rwxr-xr-xsrc/windows7.rs119
16 files changed, 1778 insertions, 0 deletions
diff --git a/.gitignore b/.gitignore
new file mode 100755
index 0000000..ea8c4bf
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1 @@
+/target
diff --git a/.vscode/settings.json b/.vscode/settings.json
new file mode 100755
index 0000000..cf06105
--- /dev/null
+++ b/.vscode/settings.json
@@ -0,0 +1,4 @@
+{
+ "rust-analyzer.cargo.features": [],
+ "rust-analyzer.check.features": []
+} \ No newline at end of file
diff --git a/Cargo.lock b/Cargo.lock
new file mode 100755
index 0000000..a9fab9b
--- /dev/null
+++ b/Cargo.lock
@@ -0,0 +1,230 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 4
+
+[[package]]
+name = "autocfg"
+version = "1.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0"
+
+[[package]]
+name = "bitflags"
+version = "2.6.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de"
+
+[[package]]
+name = "cfg-if"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
+
+[[package]]
+name = "happylock"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1949ae82dd58a6d8a76ec4761e3b1e01ffd9eb3871cb2d8eba6eda8e66a7b3d6"
+dependencies = [
+ "lock_api",
+ "once_cell",
+ "parking_lot",
+ "thread_local",
+]
+
+[[package]]
+name = "libc"
+version = "0.2.169"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b5aba8db14291edd000dfcc4d620c7ebfb122c613afb886ca8803fa4e128a20a"
+
+[[package]]
+name = "lock_api"
+version = "0.4.12"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17"
+dependencies = [
+ "autocfg",
+ "scopeguard",
+]
+
+[[package]]
+name = "no-panic"
+version = "0.1.33"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8f7da86466fe446079286ef4b2f6d789755b610a9d85da8477633f734d2697e8"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "once_cell"
+version = "1.20.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775"
+
+[[package]]
+name = "parking_lot"
+version = "0.12.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27"
+dependencies = [
+ "lock_api",
+ "parking_lot_core",
+]
+
+[[package]]
+name = "parking_lot_core"
+version = "0.9.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8"
+dependencies = [
+ "cfg-if",
+ "libc",
+ "redox_syscall",
+ "smallvec",
+ "windows-targets",
+]
+
+[[package]]
+name = "proc-macro2"
+version = "1.0.92"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "37d3544b3f2748c54e147655edb5025752e2303145b5aefb3c3ea2c78b973bb0"
+dependencies = [
+ "unicode-ident",
+]
+
+[[package]]
+name = "quote"
+version = "1.0.37"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af"
+dependencies = [
+ "proc-macro2",
+]
+
+[[package]]
+name = "redox_syscall"
+version = "0.5.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "03a862b389f93e68874fbf580b9de08dd02facb9a788ebadaf4a3fd33cf58834"
+dependencies = [
+ "bitflags",
+]
+
+[[package]]
+name = "scopeguard"
+version = "1.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
+
+[[package]]
+name = "smallvec"
+version = "1.13.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67"
+
+[[package]]
+name = "syn"
+version = "2.0.91"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d53cbcb5a243bd33b7858b1d7f4aca2153490815872d86d955d6ea29f743c035"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "unicode-ident",
+]
+
+[[package]]
+name = "sys_locks"
+version = "0.1.0"
+dependencies = [
+ "cfg-if",
+ "happylock",
+ "libc",
+ "lock_api",
+ "no-panic",
+]
+
+[[package]]
+name = "thread_local"
+version = "1.1.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c"
+dependencies = [
+ "cfg-if",
+ "once_cell",
+]
+
+[[package]]
+name = "unicode-ident"
+version = "1.0.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "adb9e6ca4f869e1180728b7950e35922a7fc6397f7b641499e8f3ef06e50dc83"
+
+[[package]]
+name = "windows-targets"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973"
+dependencies = [
+ "windows_aarch64_gnullvm",
+ "windows_aarch64_msvc",
+ "windows_i686_gnu",
+ "windows_i686_gnullvm",
+ "windows_i686_msvc",
+ "windows_x86_64_gnu",
+ "windows_x86_64_gnullvm",
+ "windows_x86_64_msvc",
+]
+
+[[package]]
+name = "windows_aarch64_gnullvm"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3"
+
+[[package]]
+name = "windows_aarch64_msvc"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469"
+
+[[package]]
+name = "windows_i686_gnu"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b"
+
+[[package]]
+name = "windows_i686_gnullvm"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66"
+
+[[package]]
+name = "windows_i686_msvc"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66"
+
+[[package]]
+name = "windows_x86_64_gnu"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78"
+
+[[package]]
+name = "windows_x86_64_gnullvm"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d"
+
+[[package]]
+name = "windows_x86_64_msvc"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec"
diff --git a/Cargo.toml b/Cargo.toml
new file mode 100755
index 0000000..eb198b4
--- /dev/null
+++ b/Cargo.toml
@@ -0,0 +1,19 @@
+[package]
+name = "sys_locks"
+version = "0.1.0"
+edition = "2021"
+
+# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
+
+[dependencies]
+cfg-if = "1"
+libc = "^0.2.155"
+lock_api = { version = "0.4", optional = true }
+no-panic = "0.1"
+
+[dev-dependencies]
+happylock = "0.3"
+
+[features]
+default = ["lock_api"]
+unsafe_lock = []
diff --git a/LICENSE b/LICENSE
new file mode 100755
index 0000000..468cd79
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,23 @@
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE. \ No newline at end of file
diff --git a/examples/happylock.rs b/examples/happylock.rs
new file mode 100755
index 0000000..4ebc2f3
--- /dev/null
+++ b/examples/happylock.rs
@@ -0,0 +1,77 @@
+use std::{thread, time::Duration};
+
+use happylock::{collection, ThreadKey};
+
+type Mutex<T> = happylock::mutex::Mutex<T, sys_locks::Mutex>;
+
+static PHILOSOPHERS: [Philosopher; 5] = [
+ Philosopher {
+ name: "Socrates",
+ left: 0,
+ right: 1,
+ },
+ Philosopher {
+ name: "John Rawls",
+ left: 1,
+ right: 2,
+ },
+ Philosopher {
+ name: "Jeremy Bentham",
+ left: 2,
+ right: 3,
+ },
+ Philosopher {
+ name: "John Stuart Mill",
+ left: 3,
+ right: 4,
+ },
+ Philosopher {
+ name: "Judith Butler",
+ left: 4,
+ right: 0,
+ },
+];
+
+static FORKS: [Mutex<()>; 5] = [
+ Mutex::new(()),
+ Mutex::new(()),
+ Mutex::new(()),
+ Mutex::new(()),
+ Mutex::new(()),
+];
+
+struct Philosopher {
+ name: &'static str,
+ left: usize,
+ right: usize,
+}
+
+impl Philosopher {
+ fn cycle(&self) {
+ let key = ThreadKey::get().unwrap();
+ thread::sleep(Duration::from_secs(1));
+
+ // safety: no philosopher asks for the same fork twice
+ let forks = [&FORKS[self.left], &FORKS[self.right]];
+ let forks = unsafe { collection::RefLockCollection::new_unchecked(&forks) };
+ let forks = forks.lock(key);
+ println!("{} is eating...", self.name);
+ thread::sleep(Duration::from_secs(1));
+ println!("{} is done eating", self.name);
+ drop(forks);
+ }
+}
+
+fn main() {
+ let handles: Vec<_> = PHILOSOPHERS
+ .iter()
+ .map(|philosopher| thread::spawn(move || philosopher.cycle()))
+ // The `collect` is absolutely necessary, because we're using lazy
+ // iterators. If `collect` isn't used, then the thread won't spawn
+ // until we try to join on it.
+ .collect();
+
+ for handle in handles {
+ _ = handle.join();
+ }
+}
diff --git a/rustfmt.toml b/rustfmt.toml
new file mode 100755
index 0000000..a49072a
--- /dev/null
+++ b/rustfmt.toml
@@ -0,0 +1,3 @@
+edition = "2021"
+hard_tabs = true
+newline_style = "Unix" \ No newline at end of file
diff --git a/src/futex.rs b/src/futex.rs
new file mode 100755
index 0000000..ad74f11
--- /dev/null
+++ b/src/futex.rs
@@ -0,0 +1,535 @@
+use core::sync::atomic::Ordering;
+
+use cfg_if::cfg_if;
+
+cfg_if! {
+ if #[cfg(any(
+ target_os = "linux",
+ target_os = "android",
+ //all(target_os = "emscripten", target_feature = "atomics"),
+ //target_os = "freebsd",
+ //target_os = "openbsd",
+ //target_os = "dragonfly",
+ //target_os = "fuchsia",
+ ))] {
+ mod linux;
+ use linux::{Futex, Primitive, SmallFutex, SmallPrimitive};
+ } else if #[cfg(all(target_os = "windows", not(target_vendor = "win7")))] {
+ mod windows;
+ use windows::Futex;
+ }
+}
+
+const SPIN_COUNT: u32 = 100;
+
+const UNLOCKED: SmallPrimitive = 0;
+const LOCKED: SmallPrimitive = 1; // locked, no other threads waiting
+const CONTENDED: SmallPrimitive = 2; // locked, and other threads waiting (contended)
+
+pub struct Mutex(SmallFutex);
+
+impl Mutex {
+ #[inline]
+ pub const fn new() -> Self {
+ Self(Futex::new(UNLOCKED))
+ }
+
+ /// Locks the mutex
+ ///
+ /// # Safety
+ ///
+ /// UB occurs if the mutex is already locked by the current thread and the
+ /// `unsafe_lock` feature is enabled.
+ #[inline]
+ pub unsafe fn lock(&self) {
+ if self
+ .0
+ .compare_exchange(UNLOCKED, LOCKED, Ordering::Acquire, Ordering::Relaxed)
+ .is_err()
+ {
+ self.lock_contended();
+ }
+ }
+
+ /// If the mutex is unlocked, it is locked, and this function returns
+ /// `true'. Otherwise, `false` is returned.
+ #[inline]
+ pub unsafe fn try_lock(&self) -> bool {
+ self.0
+ .compare_exchange(UNLOCKED, LOCKED, Ordering::Acquire, Ordering::Relaxed)
+ .is_ok()
+ }
+
+ /// Unlocks the mutex
+ ///
+ /// # Safety
+ ///
+ /// UB occurs if the mutex is already unlocked or if it has been locked on
+ /// a different thread.
+ #[inline]
+ pub unsafe fn unlock(&self) {
+ if self.0.swap(UNLOCKED, Ordering::Release) == CONTENDED {
+ // We only wake up one thread. When that thread locks the mutex, it
+ // will mark the mutex as CONTENDED (see lock_contended above),
+ // which makes sure that any other waiting threads will also be
+ // woken up eventually.
+ self.0.wake();
+ }
+ }
+
+ #[inline]
+ pub unsafe fn is_locked(&self) -> bool {
+ self.0.load(Ordering::Relaxed) == UNLOCKED
+ }
+
+ #[cold]
+ fn lock_contended(&self) {
+ // Spin first to speed things up if the lock is released quickly.
+ let mut state = self.spin();
+
+ // If it's unlocked now, attempt to take the lock
+ // without marking it as contended.
+ if state == UNLOCKED {
+ match self
+ .0
+ .compare_exchange(UNLOCKED, LOCKED, Ordering::Acquire, Ordering::Relaxed)
+ {
+ Ok(_) => return, // Locked!
+ Err(s) => state = s,
+ }
+ }
+
+ loop {
+ // Put the lock in contended state.
+ // We avoid an unnecessary write if it as already set to CONTENDED,
+ // to be friendlier for the caches.
+ if state != CONTENDED && self.0.swap(CONTENDED, Ordering::Acquire) == UNLOCKED {
+ // We changed it from UNLOCKED to CONTENDED, so we just successfully locked it.
+ return;
+ }
+
+ // Wait for the futex to change state, assuming it is still CONTENDED.
+ self.0.wait(CONTENDED);
+
+ // Spin again after waking up.
+ state = self.spin();
+ }
+ }
+
+ fn spin(&self) -> SmallPrimitive {
+ let mut spin = SPIN_COUNT;
+ loop {
+ // We only use `load` (and not `swap` or `compare_exchange`)
+ // while spinning, to be easier on the caches.
+ let state = self.0.load(Ordering::Acquire);
+
+ // We stop spinning when the mutex is UNLOCKED,
+ // but also when it's CONTENDED.
+ if state != LOCKED || spin == 0 {
+ return state;
+ }
+
+ core::hint::spin_loop();
+ spin -= 1;
+ }
+ }
+}
+
+pub struct RwLock {
+ // The state consists of a 30-bit reader counter, a 'readers waiting' flag, and a 'writers waiting' flag.
+ // Bits 0.30:
+ // 0: Unlocked
+ // 1.=0x3FFF_FFFE: Locked by N readers
+ // 0x3FFF_FFFF: Write locked
+ // Bit 30: Readers are waiting on this futex.
+ // Bit 31: Writers are waiting on the writer_notify futex.
+ state: Futex,
+ // The 'condition variable' to notify writers through.
+ // Incremented on every signal.
+ writer_notify: Futex,
+}
+
+const READ_LOCKED: Primitive = 1;
+const MASK: Primitive = (1 << 30) - 1;
+const WRITE_LOCKED: Primitive = MASK;
+const DOWNGRADE: Primitive = READ_LOCKED.wrapping_sub(WRITE_LOCKED); // READ_LOCKED - WRITE_LOCKED
+const MAX_READERS: Primitive = MASK - 1;
+const READERS_WAITING: Primitive = 1 << 30;
+const WRITERS_WAITING: Primitive = 1 << 31;
+
+#[inline]
+fn is_unlocked(state: Primitive) -> bool {
+ state & MASK == 0
+}
+
+#[inline]
+fn is_write_locked(state: Primitive) -> bool {
+ state & MASK == WRITE_LOCKED
+}
+
+#[inline]
+fn has_readers_waiting(state: Primitive) -> bool {
+ state & READERS_WAITING != 0
+}
+
+#[inline]
+fn has_writers_waiting(state: Primitive) -> bool {
+ state & WRITERS_WAITING != 0
+}
+
+#[inline]
+fn is_read_lockable(state: Primitive) -> bool {
+ // This also returns false if the counter could overflow if we tried to read lock it.
+ //
+ // We don't allow read-locking if there's readers waiting, even if the lock is unlocked
+ // and there's no writers waiting. The only situation when this happens is after unlocking,
+ // at which point the unlocking thread might be waking up writers, which have priority over readers.
+ // The unlocking thread will clear the readers waiting bit and wake up readers, if necessary.
+ state & MASK < MAX_READERS && !has_readers_waiting(state) && !has_writers_waiting(state)
+}
+
+#[inline]
+fn is_read_lockable_after_wakeup(state: Primitive) -> bool {
+ // We make a special case for checking if we can read-lock _after_ a reader thread that went to
+ // sleep has been woken up by a call to `downgrade`.
+ //
+ // `downgrade` will wake up all readers and place the lock in read mode. Thus, there should be
+ // no readers waiting and the lock should be read-locked (not write-locked or unlocked).
+ //
+ // Note that we do not check if any writers are waiting. This is because a call to `downgrade`
+ // implies that the caller wants other readers to read the value protected by the lock. If we
+ // did not allow readers to acquire the lock before writers after a `downgrade`, then only the
+ // original writer would be able to read the value, thus defeating the purpose of `downgrade`.
+ state & MASK < MAX_READERS
+ && !has_readers_waiting(state)
+ && !is_write_locked(state)
+ && !is_unlocked(state)
+}
+
+#[inline]
+fn has_reached_max_readers(state: Primitive) -> bool {
+ state & MASK == MAX_READERS
+}
+
+impl RwLock {
+ #[inline]
+ pub const fn new() -> Self {
+ Self {
+ state: Futex::new(0),
+ writer_notify: Futex::new(0),
+ }
+ }
+
+ #[inline]
+ pub fn try_read(&self) -> bool {
+ self.state
+ .fetch_update(Ordering::Acquire, Ordering::Relaxed, |s| {
+ is_read_lockable(s).then(|| s + READ_LOCKED)
+ })
+ .is_ok()
+ }
+
+ #[inline]
+ pub fn read(&self) {
+ let state = self.state.load(Ordering::Relaxed);
+ if !is_read_lockable(state)
+ || self
+ .state
+ .compare_exchange_weak(
+ state,
+ state + READ_LOCKED,
+ Ordering::Acquire,
+ Ordering::Relaxed,
+ )
+ .is_err()
+ {
+ self.read_contended();
+ }
+ }
+
+ /// # Safety
+ ///
+ /// The `RwLock` must be read-locked (N readers) in order to call this.
+ #[inline]
+ pub unsafe fn read_unlock(&self) {
+ let state = self.state.fetch_sub(READ_LOCKED, Ordering::Release) - READ_LOCKED;
+
+ // It's impossible for a reader to be waiting on a read-locked RwLock,
+ // except if there is also a writer waiting.
+ debug_assert!(!has_readers_waiting(state) || has_writers_waiting(state));
+
+ // Wake up a writer if we were the last reader and there's a writer waiting.
+ if is_unlocked(state) && has_writers_waiting(state) {
+ self.wake_writer_or_readers(state);
+ }
+ }
+
+ #[cold]
+ fn read_contended(&self) {
+ let mut has_slept = false;
+ let mut state = self.spin_read();
+
+ loop {
+ // If we have just been woken up, first check for a `downgrade` call.
+ // Otherwise, if we can read-lock it, lock it.
+ if (has_slept && is_read_lockable_after_wakeup(state)) || is_read_lockable(state) {
+ match self.state.compare_exchange_weak(
+ state,
+ state + READ_LOCKED,
+ Ordering::Acquire,
+ Ordering::Relaxed,
+ ) {
+ Ok(_) => return, // Locked!
+ Err(s) => {
+ state = s;
+ continue;
+ }
+ }
+ }
+
+ // Check for overflow.
+ assert!(
+ !has_reached_max_readers(state),
+ "too many active read locks on RwLock"
+ );
+
+ // Make sure the readers waiting bit is set before we go to sleep.
+ if !has_readers_waiting(state) {
+ if let Err(s) = self.state.compare_exchange(
+ state,
+ state | READERS_WAITING,
+ Ordering::Relaxed,
+ Ordering::Relaxed,
+ ) {
+ state = s;
+ continue;
+ }
+ }
+
+ // Wait for the state to change.
+ self.state.wait(state | READERS_WAITING);
+ has_slept = true;
+
+ // Spin again after waking up.
+ state = self.spin_read();
+ }
+ }
+
+ #[inline]
+ pub fn try_write(&self) -> bool {
+ self.state
+ .fetch_update(Ordering::Acquire, Ordering::Relaxed, |s| {
+ is_unlocked(s).then(|| s + WRITE_LOCKED)
+ })
+ .is_ok()
+ }
+
+ #[inline]
+ pub fn write(&self) {
+ if self
+ .state
+ .compare_exchange_weak(0, WRITE_LOCKED, Ordering::Acquire, Ordering::Relaxed)
+ .is_err()
+ {
+ self.write_contended();
+ }
+ }
+
+ /// # Safety
+ ///
+ /// The `RwLock` must be write-locked (single writer) in order to call this.
+ #[inline]
+ pub unsafe fn write_unlock(&self) {
+ let state = self.state.fetch_sub(WRITE_LOCKED, Ordering::Release) - WRITE_LOCKED;
+
+ debug_assert!(is_unlocked(state));
+
+ if has_writers_waiting(state) || has_readers_waiting(state) {
+ self.wake_writer_or_readers(state);
+ }
+ }
+
+ /// # Safety
+ ///
+ /// The `RwLock` must be write-locked (single writer) in order to call this.
+ #[inline]
+ pub unsafe fn downgrade(&self) {
+ // Removes all write bits and adds a single read bit.
+ let state = self.state.fetch_add(DOWNGRADE, Ordering::Release);
+ debug_assert!(
+ is_write_locked(state),
+ "RwLock must be write locked to call `downgrade`"
+ );
+
+ if has_readers_waiting(state) {
+ // Since we had the exclusive lock, nobody else can unset this bit.
+ self.state.fetch_sub(READERS_WAITING, Ordering::Relaxed);
+ self.state.wake_all();
+ }
+ }
+
+ #[cold]
+ fn write_contended(&self) {
+ let mut state = self.spin_write();
+
+ let mut other_writers_waiting = 0;
+
+ loop {
+ // If it's unlocked, we try to lock it.
+ if is_unlocked(state) {
+ match self.state.compare_exchange_weak(
+ state,
+ state | WRITE_LOCKED | other_writers_waiting,
+ Ordering::Acquire,
+ Ordering::Relaxed,
+ ) {
+ Ok(_) => return, // Locked!
+ Err(s) => {
+ state = s;
+ continue;
+ }
+ }
+ }
+
+ // Set the waiting bit indicating that we're waiting on it.
+ if !has_writers_waiting(state) {
+ if let Err(s) = self.state.compare_exchange(
+ state,
+ state | WRITERS_WAITING,
+ Ordering::Relaxed,
+ Ordering::Relaxed,
+ ) {
+ state = s;
+ continue;
+ }
+ }
+
+ // Other writers might be waiting now too, so we should make sure
+ // we keep that bit on once we manage lock it.
+ other_writers_waiting = WRITERS_WAITING;
+
+ // Examine the notification counter before we check if `state` has changed,
+ // to make sure we don't miss any notifications.
+ let seq = self.writer_notify.load(Ordering::Acquire);
+
+ // Don't go to sleep if the lock has become available,
+ // or if the writers waiting bit is no longer set.
+ state = self.state.load(Ordering::Relaxed);
+ if is_unlocked(state) || !has_writers_waiting(state) {
+ continue;
+ }
+
+ // Wait for the state to change.
+ self.writer_notify.wait(seq);
+
+ // Spin again after waking up.
+ state = self.spin_write();
+ }
+ }
+
+ /// Wakes up waiting threads after unlocking.
+ ///
+ /// If both are waiting, this will wake up only one writer, but will fall
+ /// back to waking up readers if there was no writer to wake up.
+ #[cold]
+ fn wake_writer_or_readers(&self, mut state: Primitive) {
+ assert!(is_unlocked(state));
+
+ // The readers waiting bit might be turned on at any point now,
+ // since readers will block when there's anything waiting.
+ // Writers will just lock the lock though, regardless of the waiting bits,
+ // so we don't have to worry about the writer waiting bit.
+ //
+ // If the lock gets locked in the meantime, we don't have to do
+ // anything, because then the thread that locked the lock will take
+ // care of waking up waiters when it unlocks.
+
+ // If only writers are waiting, wake one of them up.
+ if state == WRITERS_WAITING {
+ match self
+ .state
+ .compare_exchange(state, 0, Ordering::Relaxed, Ordering::Relaxed)
+ {
+ Ok(_) => {
+ self.wake_writer();
+ return;
+ }
+ Err(s) => {
+ // Maybe some readers are now waiting too. So, continue to the next `if`.
+ state = s;
+ }
+ }
+ }
+
+ // If both writers and readers are waiting, leave the readers waiting
+ // and only wake up one writer.
+ if state == READERS_WAITING + WRITERS_WAITING {
+ if self
+ .state
+ .compare_exchange(state, READERS_WAITING, Ordering::Relaxed, Ordering::Relaxed)
+ .is_err()
+ {
+ // The lock got locked. Not our problem anymore.
+ return;
+ }
+ if self.wake_writer() {
+ return;
+ }
+ // No writers were actually blocked on futex_wait, so we continue
+ // to wake up readers instead, since we can't be sure if we notified a writer.
+ state = READERS_WAITING;
+ }
+
+ // If readers are waiting, wake them all up.
+ if state == READERS_WAITING
+ && self
+ .state
+ .compare_exchange(state, 0, Ordering::Relaxed, Ordering::Relaxed)
+ .is_ok()
+ {
+ self.state.wake_all();
+ }
+ }
+
+ /// This wakes one writer and returns true if we woke up a writer that was
+ /// blocked on futex_wait.
+ ///
+ /// If this returns false, it might still be the case that we notified a
+ /// writer that was about to go to sleep.
+ fn wake_writer(&self) -> bool {
+ self.writer_notify.fetch_add(1, Ordering::Release);
+ self.writer_notify.wake()
+ // Note that FreeBSD and DragonFlyBSD don't tell us whether they woke
+ // up any threads or not, and always return `false` here. That still
+ // results in correct behavior: it just means readers get woken up as
+ // well in case both readers and writers were waiting.
+ }
+
+ /// Spin for a while, but stop directly at the given condition.
+ #[inline]
+ fn spin_until(&self, f: impl Fn(Primitive) -> bool) -> Primitive {
+ let mut spin = SPIN_COUNT; // Chosen by fair dice roll.
+ loop {
+ let state = self.state.load(Ordering::Relaxed);
+ if f(state) || spin == 0 {
+ return state;
+ }
+ core::hint::spin_loop();
+ spin -= 1;
+ }
+ }
+
+ #[inline]
+ fn spin_write(&self) -> Primitive {
+ // Stop spinning when it's unlocked or when there's waiting writers, to keep things somewhat fair.
+ self.spin_until(|state| is_unlocked(state) || has_writers_waiting(state))
+ }
+
+ #[inline]
+ fn spin_read(&self) -> Primitive {
+ // Stop spinning when it's unlocked or read locked, or when there's waiting threads.
+ self.spin_until(|state| {
+ !is_write_locked(state) || has_readers_waiting(state) || has_writers_waiting(state)
+ })
+ }
+}
diff --git a/src/futex/linux.rs b/src/futex/linux.rs
new file mode 100755
index 0000000..80ca5a6
--- /dev/null
+++ b/src/futex/linux.rs
@@ -0,0 +1,61 @@
+use core::ops::Deref;
+use core::sync::atomic::{AtomicU32, Ordering};
+
+use libc::{syscall, SYS_futex};
+
+#[repr(C)]
+pub struct Futex(AtomicU32);
+
+pub type Atomic = AtomicU32;
+pub type Primitive = u32;
+
+pub type SmallFutex = Futex;
+pub type SmallAtomic = Atomic;
+pub type SmallPrimitive = Primitive;
+
+impl Futex {
+ #[inline]
+ pub const fn new(initial_value: u32) -> Self {
+ Self(AtomicU32::new(initial_value))
+ }
+
+ #[inline]
+ pub fn wait(&self, expected_start_value: u32) {
+ unsafe {
+ syscall(
+ SYS_futex,
+ core::ptr::from_ref(&self.0),
+ libc::FUTEX_WAIT_BITSET | libc::FUTEX_PRIVATE_FLAG,
+ expected_start_value,
+ core::ptr::null::<()>(),
+ core::ptr::null::<u32>(), // This argument is unused for FUTEX_WAIT_BITSET.
+ !0u32, // A full bitmask, to make it behave like a regular FUTEX_WAIT.
+ );
+ }
+ }
+
+ #[inline]
+ pub fn wake(&self) -> bool {
+ let ptr = &self.0 as *const AtomicU32;
+ const OP: libc::c_int = libc::FUTEX_WAKE | libc::FUTEX_PRIVATE_FLAG;
+ unsafe { libc::syscall(libc::SYS_futex, ptr, OP, 1) > 0 }
+ }
+
+ #[inline]
+ pub fn wake_all(&self) {
+ let ptr = &raw const self.0;
+ let op = libc::FUTEX_WAKE | libc::FUTEX_PRIVATE_FLAG;
+ unsafe {
+ syscall(libc::SYS_futex, ptr, op, i32::MAX);
+ }
+ }
+}
+
+impl Deref for Futex {
+ type Target = Atomic;
+
+ #[inline]
+ fn deref(&self) -> &Self::Target {
+ &self.0
+ }
+}
diff --git a/src/futex/windows.rs b/src/futex/windows.rs
new file mode 100755
index 0000000..153d9ef
--- /dev/null
+++ b/src/futex/windows.rs
@@ -0,0 +1,114 @@
+use core::ffi::c_void;
+
+#[cfg(not(target_vendor = "win7"))]
+// Use raw-dylib to import synchronization functions to workaround issues with the older mingw import library.
+#[cfg_attr(
+ target_arch = "x86",
+ link(
+ name = "api-ms-win-core-synch-l1-2-0",
+ kind = "raw-dylib",
+ import_name_type = "undecorated"
+ )
+)]
+#[cfg_attr(
+ not(target_arch = "x86"),
+ link(name = "api-ms-win-core-synch-l1-2-0", kind = "raw-dylib")
+)]
+extern "system" {
+ type BOOL = i32;
+
+ pub const TRUE: BOOL = 1;
+ pub const INFINITE: u32 = 4294967295;
+
+ pub fn WaitOnAddress(
+ address: *const c_void,
+ compareaddress: *const c_void,
+ addresssize: usize,
+ dwmilliseconds: u32,
+ ) -> BOOL;
+ pub fn WakeByAddressSingle(address: *const c_void);
+ pub fn WakeByAddressAll(address: *const c_void);
+}
+
+pub struct Futex(AtomicU32);
+pub type Primitive = u32;
+
+impl Futex {
+ #[inline]
+ pub const fn new(initial_value: Primitive) -> Self {
+ Self(AtomicU32::new(initial_value))
+ }
+
+ #[inline]
+ pub fn wait(&self, expected_start_value: Primitive) {
+ const SIZE: u32 = core::mem::size_of::<Atomic>();
+ let addr = core::ptr::from_ref(&self.0).cast::<c_void>();
+ let compare_addr = ptr::addr_of!(compare).cast::<c_void>();
+ WaitOnAddress(addr, expected_start_value, SIZE, INFINITE);
+ }
+
+ #[inline]
+ pub fn wake(&self) -> bool {
+ WakeByAddressSingle(core::ptr::from_ref(&self.0).cast::<c_void>());
+ false
+ }
+
+ #[inline]
+ pub fn wake_all(&self) {
+ unsafe {
+ let addr = core::ptr::from_ref(address).cast::<c_void>();
+ WakeByAddressAll(addr);
+ }
+ }
+}
+
+impl Deref for Futex {
+ type Target = Atomic;
+
+ #[inline]
+ fn deref(&self) -> &Self::Target {
+ &self.0
+ }
+}
+
+pub struct SmallFutex(AtomicU8);
+pub type SmallAtomic = AtomicU8;
+pub type SmallPrimitive = u8;
+
+impl SmallFutex {
+ #[inline]
+ pub const fn new(initial_value: SmallPrimitive) -> Self {
+ Self(AtomicU32::new(initial_value))
+ }
+
+ #[inline]
+ pub fn wait(&self, expected_start_value: SmallPrimitive) {
+ const SIZE: u32 = core::mem::size_of::<SmallAtomic>();
+ let addr = core::ptr::from_ref(&self.0).cast::<c_void>();
+ let compare_addr = ptr::addr_of!(compare).cast::<c_void>();
+ WaitOnAddress(addr, expected_start_value, SIZE, INFINITE);
+ }
+
+ #[inline]
+ pub fn wake(&self) -> bool {
+ WakeByAddressSingle(core::ptr::from_ref(&self.0).cast::<c_void>());
+ false
+ }
+
+ #[inline]
+ pub fn wake_all(&self) {
+ unsafe {
+ let addr = core::ptr::from_ref(address).cast::<c_void>();
+ WakeByAddressAll(addr);
+ }
+ }
+}
+
+impl Deref for SmallFutex {
+ type Target = SmallAtomic;
+
+ #[inline]
+ fn deref(&self) -> &Self::Target {
+ &self.0
+ }
+}
diff --git a/src/lazy_box.rs b/src/lazy_box.rs
new file mode 100755
index 0000000..0fd7fd9
--- /dev/null
+++ b/src/lazy_box.rs
@@ -0,0 +1,97 @@
+// This is used to wrap pthread {Mutex, Condvar, RwLock} in.
+
+use core::marker::PhantomData;
+use core::ops::{Deref, DerefMut};
+use core::ptr::null_mut;
+use core::sync::atomic::{AtomicPtr, Ordering};
+
+use alloc::boxed::Box;
+
+pub struct LazyBox<T: LazyInit> {
+ ptr: AtomicPtr<T>,
+ _phantom: PhantomData<T>,
+}
+
+pub trait LazyInit {
+ /// This is called before the box is allocated, to provide the value to
+ /// move into the new box.
+ ///
+ /// It might be called more than once per LazyBox, as multiple threads
+ /// might race to initialize it concurrently, each constructing and initializing
+ /// their own box. All but one of them will be passed to `cancel_init` right after.
+ fn init() -> Box<Self>;
+
+ /// Any surplus boxes from `init()` that lost the initialization race
+ /// are passed to this function for disposal.
+ ///
+ /// The default implementation calls destroy().
+ fn cancel_init(x: Box<Self>) {
+ Self::destroy(x);
+ }
+
+ /// This is called to destroy a used box.
+ ///
+ /// The default implementation just drops it.
+ fn destroy(_: Box<Self>) {}
+}
+
+impl<T: LazyInit> LazyBox<T> {
+ #[inline]
+ pub const fn new() -> Self {
+ Self {
+ ptr: AtomicPtr::new(null_mut()),
+ _phantom: PhantomData,
+ }
+ }
+
+ #[inline]
+ fn get_pointer(&self) -> *mut T {
+ let ptr = self.ptr.load(Ordering::Acquire);
+ if ptr.is_null() {
+ self.initialize()
+ } else {
+ ptr
+ }
+ }
+
+ #[cold]
+ fn initialize(&self) -> *mut T {
+ let new_ptr = Box::into_raw(T::init());
+ match self
+ .ptr
+ .compare_exchange(null_mut(), new_ptr, Ordering::AcqRel, Ordering::Acquire)
+ {
+ Ok(_) => new_ptr,
+ Err(ptr) => {
+ // Lost the race to another thread.
+ // Drop the box we created, and use the one from the other thread instead.
+ T::cancel_init(unsafe { Box::from_raw(new_ptr) });
+ ptr
+ }
+ }
+ }
+}
+
+impl<T: LazyInit> Deref for LazyBox<T> {
+ type Target = T;
+ #[inline]
+ fn deref(&self) -> &T {
+ unsafe { &*self.get_pointer() }
+ }
+}
+
+impl<T: LazyInit> DerefMut for LazyBox<T> {
+ #[inline]
+ fn deref_mut(&mut self) -> &mut T {
+ unsafe { &mut *self.get_pointer() }
+ }
+}
+
+impl<T: LazyInit> Drop for LazyBox<T> {
+ fn drop(&mut self) {
+ let ptr = *self.ptr.get_mut();
+ if !ptr.is_null() {
+ T::destroy(unsafe { Box::from_raw(ptr) });
+ }
+ }
+}
diff --git a/src/lib.rs b/src/lib.rs
new file mode 100755
index 0000000..b26d0b1
--- /dev/null
+++ b/src/lib.rs
@@ -0,0 +1,187 @@
+#![no_std]
+#![forbid(unsafe_op_in_unsafe_fn)]
+
+extern crate alloc;
+
+// tier 1: linux, windows, wasm w/o atomics
+// tier 2: mac os, android, ios, fuschia, illumos, freebsd, netbsd, solaris, redox, uefi, zkvm, embedded
+// not supported: dragonfly, wasm w/ atomics, hermit, teeos, sgx, solid, xous
+
+cfg_if::cfg_if! {
+ if #[cfg(any(
+ all(target_os = "windows", not(target_vendor = "win7")),
+ target_os = "linux",
+ target_os = "android",
+ //target_os = "freebsd",
+ //target_os = "openbsd",
+ //target_os = "dragonfly",
+ //all(target_family = "wasm", target_feature = "atomics"),
+ //target_os = "hermit",
+ ))] {
+ mod futex;
+ } else if #[cfg(any(
+ target_os = "dragonfly",
+ all(target_family = "wasm", target_feature = "atomics"),
+ target_os = "hermit"
+ ))] {
+ // merge with above when implemented
+ //} else if #[cfg(target_os = "fuchsia")] {
+ // mod fuchsia;
+ // mod unix;
+ } else if #[cfg(any(
+ target_family = "unix",
+ ))] {
+ extern crate alloc;
+
+ mod lazy_box;
+
+ mod pthread;
+ //mod queue;
+ } else if #[cfg(target_os = "teeos")] {
+ extern crate alloc;
+
+ mod lazy_box;
+
+ mod pthread;
+ //mod teeos;
+ } else if #[cfg(all(target_os = "windows", target_vendor = "win7"))] {
+ mod windows7;
+ //mod queue;
+ } else if #[cfg(all(target_vendor = "fortanix", target_env = "sgx"))] {
+ //mod sgx;
+ //mod queue;
+ } else if #[cfg(target_os = "solid_asp3")] {
+ //mod itron;
+ //mod solid;
+ } else if #[cfg(target_os = "xous")] {
+ //mod xous;
+ //mod queue;
+ } else if #[cfg(any(
+ target_family = "wasm",
+ target_os = "uefi",
+ target_os = "zkvm"
+ ))] {
+ mod no_threads;
+ } else if #[cfg(target_has_atomic = "8")] {
+ mod spin;
+ }
+}
+
+cfg_if::cfg_if! {
+ if #[cfg(any(
+ all(target_os = "windows", not(target_vendor = "win7")),
+ target_os = "linux",
+ target_os = "android",
+ //target_os = "freebsd",
+ //target_os = "openbsd",
+ //target_os = "dragonfly",
+ //all(target_family = "wasm", target_feature = "atomics"),
+ //target_os = "hermit",
+ ))] {
+ pub use futex::Mutex;
+ } else if #[cfg(any(
+ target_os = "dragonfly",
+ all(target_family = "wasm", target_feature = "atomics"),
+ target_os = "hermit"
+ ))] {
+ // merge with above when implemented
+ //} else if #[cfg(target_os = "fuchsia")] {
+ // mod fuchsia;
+ // pub use fuchsia::Mutex;
+ } else if #[cfg(any(
+ target_family = "unix",
+ //target_os = "teeos",
+ ))] {
+ pub use pthread::Mutex;
+ } else if #[cfg(target_os = "teeos")] {
+ // merge with above when implemented
+ } else if #[cfg(all(target_os = "windows", target_vendor = "win7"))] {
+ pub use windows7::Mutex;
+ } else if #[cfg(all(target_vendor = "fortanix", target_env = "sgx"))] {
+ //pub use sgx::Mutex;
+ } else if #[cfg(target_os = "solid_asp3")] {
+ //pub use itron::Mutex;
+ } else if #[cfg(target_os = "xous")] {
+ //pub use xous::Mutex;
+ } else if #[cfg(any(
+ target_family = "wasm",
+ target_os = "uefi",
+ target_os = "zkvm"
+ ))] {
+ pub use no_threads::Mutex;
+ } else if #[cfg(all(target_os = "none", target_has_atomic = "8"))] {
+ pub use spin::Mutex;
+ }
+}
+
+cfg_if::cfg_if! {
+ if #[cfg(any(
+ all(target_os = "windows", not(target_vendor = "win7")),
+ target_os = "linux",
+ target_os = "android",
+ //target_os = "freebsd",
+ //target_os = "openbsd",
+ //target_os = "dragonfly",
+ //target_os = "fuchsia",
+ //all(target_family = "wasm", target_feature = "atomics"),
+ //target_os = "hermit",
+ ))] {
+ pub use futex::RwLock;
+ } else if #[cfg(any(
+ all(target_family = "wasm", target_feature = "atomics"),
+ target_os = "hermit"
+ ))] {
+ // merge with above when implemented
+ } else if #[cfg(any(
+ target_family = "unix",
+ all(target_os = "windows", target_vendor = "win7"),
+ //all(target_vendor = "fortanix", target_env = "sgx"),
+ //target_os = "xous",
+ ))] {
+ //pub use queue::RwLock;
+ } else if #[cfg(any(
+ all(target_vendor = "fortanix", target_env = "sgx"),
+ target_os = "xous",)
+ )] {
+ // merge with above when implemented
+ } else if #[cfg(target_os = "solid_asp3")] {
+ //pub use solid::RwLock;
+ } else if #[cfg(target_os = "teeos")] {
+ //pub use teeos::RwLock;
+ } else if #[cfg(any(
+ target_family = "wasm",
+ target_os = "uefi",
+ target_os = "zkvm"
+ ))] {
+ pub use no_threads::RwLock;
+ } else if #[cfg(all(target_os = "none", target_has_atomic = "8"))] {
+ //pub use spin::RwLock;
+ }
+}
+
+unsafe impl lock_api::RawMutex for Mutex {
+ #[allow(clippy::declare_interior_mutable_const)]
+ const INIT: Self = Self::new();
+
+ type GuardMarker = lock_api::GuardNoSend;
+
+ #[no_panic::no_panic]
+ fn lock(&self) {
+ // safety: unsafe_lock is disabled
+ unsafe { self.lock() }
+ }
+
+ #[no_panic::no_panic]
+ fn try_lock(&self) -> bool {
+ unsafe { self.try_lock() }
+ }
+
+ #[no_panic::no_panic]
+ unsafe fn unlock(&self) {
+ unsafe { self.unlock() }
+ }
+
+ fn is_locked(&self) -> bool {
+ unsafe { self.is_locked() }
+ }
+}
diff --git a/src/no_threads.rs b/src/no_threads.rs
new file mode 100755
index 0000000..f831f5f
--- /dev/null
+++ b/src/no_threads.rs
@@ -0,0 +1,122 @@
+use core::cell::Cell;
+
+pub struct Mutex {
+ // This platform has no threads, so we can use a Cell here.
+ locked: Cell<bool>,
+}
+
+unsafe impl Send for Mutex {}
+unsafe impl Sync for Mutex {} // no threads on this platform
+
+impl Mutex {
+ #[inline]
+ pub const fn new() -> Mutex {
+ Mutex {
+ locked: Cell::new(false),
+ }
+ }
+
+ /// Locks the mutex
+ ///
+ /// # Safety
+ ///
+ /// UB occurs if the mutex is already locked by the current thread and the
+ /// `unsafe_lock` feature is enabled.
+ #[inline]
+ pub unsafe fn lock(&self) {
+ if self.locked.replace(true) {
+ if cfg!(feature = "unsafe_lock") {
+ // safety: it's UB to lock this when it's already locked
+ unsafe { core::hint::unreachable_unchecked() };
+ } else {
+ panic!("deadlock on a platform with no threads");
+ }
+ }
+ }
+
+ /// If the mutex is unlocked, it is locked, and this function returns
+ /// `true'. Otherwise, `false` is returned.
+ #[inline]
+ pub unsafe fn try_lock(&self) -> bool {
+ self.locked.replace(true) == false
+ }
+
+ /// Unlocks the mutex
+ ///
+ /// # Safety
+ ///
+ /// UB occurs if the mutex is already unlocked or if it has been locked on
+ /// a different thread.
+ #[inline]
+ pub unsafe fn unlock(&self) {
+ self.locked.set(false);
+ }
+
+ #[inline]
+ pub unsafe fn is_locked(&self) -> bool {
+ self.locked.get()
+ }
+}
+
+pub struct RwLock {
+ // This platform has no threads, so we can use a Cell here.
+ mode: Cell<isize>,
+}
+
+unsafe impl Send for RwLock {}
+unsafe impl Sync for RwLock {} // no threads on this platform
+
+impl RwLock {
+ #[inline]
+ pub const fn new() -> RwLock {
+ RwLock { mode: Cell::new(0) }
+ }
+
+ #[inline]
+ pub unsafe fn read(&self) {
+ let m = self.mode.get();
+ if m >= 0 {
+ self.mode.set(m + 1);
+ } else {
+ unsafe { core::hint::unreachable_unchecked() };
+ }
+ }
+
+ #[inline]
+ pub unsafe fn try_read(&self) -> bool {
+ let m = self.mode.get();
+ if m >= 0 {
+ self.mode.set(m + 1);
+ true
+ } else {
+ false
+ }
+ }
+
+ #[inline]
+ pub unsafe fn write(&self) {
+ if self.mode.replace(isize::MAX) != 0 {
+ unsafe { core::hint::unreachable_unchecked() };
+ }
+ }
+
+ #[inline]
+ pub unsafe fn try_write(&self) -> bool {
+ if self.mode.get() == 0 {
+ self.mode.set(-1);
+ true
+ } else {
+ false
+ }
+ }
+
+ #[inline]
+ pub unsafe fn read_unlock(&self) {
+ self.mode.set(self.mode.get() - 1);
+ }
+
+ #[inline]
+ pub unsafe fn write_unlock(&self) {
+ assert_eq!(self.mode.replace(0), -1);
+ }
+}
diff --git a/src/pthread.rs b/src/pthread.rs
new file mode 100755
index 0000000..b549e1d
--- /dev/null
+++ b/src/pthread.rs
@@ -0,0 +1,118 @@
+use core::cell::UnsafeCell;
+use core::ops::Deref;
+
+use alloc::boxed::Box;
+
+use libc::{
+ pthread_mutex_lock, pthread_mutex_t, pthread_mutex_trylock, pthread_mutex_unlock, EBUSY,
+ PTHREAD_MUTEX_INITIALIZER,
+};
+
+use crate::lazy_box::{LazyBox, LazyInit};
+
+struct RawMutex(UnsafeCell<pthread_mutex_t>);
+
+impl LazyInit for RawMutex {
+ fn init() -> Box<Self> {
+ // We need to box this no matter what, because copying a pthread mutex
+ // results in undocumented behavior.
+ let mutex = Box::new(Self(UnsafeCell::new(PTHREAD_MUTEX_INITIALIZER)));
+
+ if !cfg!(feature = "unsafe_lock") {
+ // A pthread mutex initialized with PTHREAD_MUTEX_INITIALIZER will have
+ // a type of PTHREAD_MUTEX_DEFAULT, which has undefined behavior if you
+ // try to re-lock it from the same thread when you already hold a lock
+ // (https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_mutex_init.html).
+ // This is the case even if PTHREAD_MUTEX_DEFAULT == PTHREAD_MUTEX_NORMAL
+ // (https://github.com/rust-lang/rust/issues/33770#issuecomment-220847521) -- in that
+ // case, `pthread_mutexattr_settype(PTHREAD_MUTEX_DEFAULT)` will of course be the same
+ // as setting it to `PTHREAD_MUTEX_NORMAL`, but not setting any mode will result in
+ // a Mutex where re-locking is UB.
+ //
+ // In practice, glibc takes advantage of this undefined behavior to
+ // implement hardware lock elision, which uses hardware transactional
+ // memory to avoid acquiring the lock. While a transaction is in
+ // progress, the lock appears to be unlocked. This isn't a problem for
+ // other threads since the transactional memory will abort if a conflict
+ // is detected, however no abort is generated when re-locking from the
+ // same thread.
+ //
+ // Since locking the same mutex twice will result in two aliasing &mut
+ // references, we instead create the mutex with type
+ // PTHREAD_MUTEX_NORMAL which is guaranteed to deadlock if we try to
+ // re-lock it from the same thread, thus avoiding undefined behavior.
+ unsafe {
+ let mut attr = MaybeUninit::<libc::pthread_mutexattr_t>::uninit();
+ libc::pthread_mutexattr_init(attr.as_mut_ptr());
+ let attr = PthreadMutexAttr(&mut attr);
+ libc::pthread_mutexattr_settype(attr.0.as_mut_ptr(), libc::PTHREAD_MUTEX_NORMAL);
+ libc::pthread_mutex_init(mutex.0.get(), attr.0.as_ptr());
+ }
+ }
+
+ mutex
+ }
+}
+
+impl Deref for RawMutex {
+ type Target = UnsafeCell<pthread_mutex_t>;
+
+ fn deref(&self) -> &Self::Target {
+ &self.0
+ }
+}
+
+pub struct Mutex(LazyBox<RawMutex>);
+
+unsafe impl Send for Mutex {}
+unsafe impl Sync for Mutex {}
+
+impl Mutex {
+ #[inline]
+ pub const fn new() -> Self {
+ Self(LazyBox::new())
+ }
+
+ /// Locks the mutex
+ ///
+ /// # Safety
+ ///
+ /// UB occurs if the mutex is already locked by the current thread and the
+ /// `unsafe_lock` feature is enabled.
+ #[inline]
+ pub unsafe fn lock(&self) {
+ // safety: the pointer is valid
+ pthread_mutex_lock(self.0.get());
+ }
+
+ /// If the mutex is unlocked, it is locked, and this function returns
+ /// `true'. Otherwise, `false` is returned.
+ #[inline]
+ pub unsafe fn try_lock(&self) -> bool {
+ // safety: the pointer is valid
+ unsafe { pthread_mutex_trylock(self.0.get()) == 0 }
+ }
+
+ /// Unlocks the mutex
+ ///
+ /// # Safety
+ ///
+ /// UB occurs if the mutex is already unlocked or if it has been locked on
+ /// a different thread.
+ #[inline]
+ pub unsafe fn unlock(&self) {
+ // safety: the pointer is valid
+ pthread_mutex_unlock(self.0.get());
+ }
+
+ pub unsafe fn is_locked(&self) -> bool {
+ if self.try_lock() {
+ unsafe {
+ self.unlock();
+ }
+ false
+ } else {
+ true
+ }
+ }
+}
diff --git a/src/spin.rs b/src/spin.rs
new file mode 100755
index 0000000..c81a1e7
--- /dev/null
+++ b/src/spin.rs
@@ -0,0 +1,68 @@
+use core::sync::atomic::{AtomicBool, Ordering};
+
+pub struct Mutex(AtomicBool);
+
+unsafe impl Send for Mutex {}
+unsafe impl Sync for Mutex {}
+
+impl Mutex {
+ #[inline]
+ pub const fn new() -> Mutex {
+ Mutex(AtomicBool::new(false))
+ }
+
+ /// Locks the mutex
+ ///
+ /// # Safety
+ ///
+ /// UB occurs if the mutex is already locked by the current thread and the
+ /// `unsafe_lock` feature is enabled.
+ #[inline]
+ pub unsafe fn lock(&self) {
+ // Can fail to lock even if the spinlock is not locked. May be more
+ // efficient than `try_lock` hen called in a loop.
+ loop {
+ if self.try_lock_weak() {
+ break;
+ }
+
+ while self.is_locked() {
+ core::hint::spin_loop();
+ }
+ }
+ }
+
+ /// If the mutex is unlocked, it is locked, and this function returns
+ /// `true'. Otherwise, `false` is returned.
+ #[inline]
+ pub unsafe fn try_lock(&self) -> bool {
+ // The reason for using a strong compare_exchange is explained here:
+ // https://github.com/Amanieu/parking_lot/pull/207#issuecomment-575869107
+ self.lock
+ .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
+ .is_ok()
+ }
+
+ /// Unlocks the mutex
+ ///
+ /// # Safety
+ ///
+ /// UB occurs if the mutex is already unlocked or if it has been locked on
+ /// a different thread.
+ #[inline]
+ pub unsafe fn unlock(&self) {
+ self.locked.set(false);
+ }
+
+ #[inline]
+ pub unsafe fn is_locked(&self) -> bool {
+ self.lock.load(Ordering::Relaxed)
+ }
+
+ #[inline]
+ fn try_lock_weak(&self) -> bool {
+ self.lock
+ .compare_exchange_weak(false, true, Ordering::Acquire, Ordering::Relaxed)
+ .is_ok()
+ }
+}
diff --git a/src/windows7.rs b/src/windows7.rs
new file mode 100755
index 0000000..2007d05
--- /dev/null
+++ b/src/windows7.rs
@@ -0,0 +1,119 @@
+use core::cell::UnsafeCell;
+
+#[repr(C)]
+#[derive(Clone, Copy)]
+pub struct SrwLock {
+ ptr: *mut core::ffi::c_void,
+}
+
+const SRWLOCK_INIT: SrwLock = SrwLock {
+ ptr: core::ptr::null_mut(),
+};
+
+type Boolean = u8;
+
+#[link(name = "kernel32")]
+extern "system" {
+ pub fn AcquireSRWLockShared(srwlock: *mut SrwLock);
+ pub fn AcquireSRWLockExclusive(srwlock: *mut SrwLock);
+ pub fn TryAcquireSRWLockShared(srwlock: *mut SrwLock) -> Boolean;
+ pub fn TryAcquireSRWLockExclusive(srwlock: *mut SrwLock) -> Boolean;
+ pub fn ReleaseSRWLockShared(srwlock: *mut SrwLock);
+ pub fn ReleaseSRWLockExclusive(srwlock: *mut SrwLock);
+}
+
+pub struct Mutex(UnsafeCell<SrwLock>);
+
+unsafe impl Send for Mutex {}
+unsafe impl Sync for Mutex {}
+
+impl Mutex {
+ #[inline]
+ pub const fn new() -> Self {
+ Self(UnsafeCell::new(SRWLOCK_INIT))
+ }
+
+ /// Locks the mutex
+ ///
+ /// # Safety
+ ///
+ /// UB occurs if the mutex is already locked by the current thread and the
+ /// `unsafe_lock` feature is enabled.
+ #[inline]
+ pub unsafe fn lock(&self) {
+ unsafe {
+ AcquireSRWLockExclusive(self.0.get());
+ }
+ }
+
+ /// If the mutex is unlocked, it is locked, and this function returns
+ /// `true'. Otherwise, `false` is returned.
+ #[inline]
+ pub unsafe fn try_lock(&self) -> bool {
+ unsafe { TryAcquireSRWLockExclusive(self.0.get()) != 0 }
+ }
+
+ /// Unlocks the mutex
+ ///
+ /// # Safety
+ ///
+ /// UB occurs if the mutex is already unlocked or if it has been locked on
+ /// a different thread.
+ #[inline]
+ pub unsafe fn unlock(&self) {
+ unsafe {
+ ReleaseSRWLockExclusive(self.0.get());
+ }
+ }
+
+ pub unsafe fn is_locked(&self) -> bool {
+ if self.try_lock() {
+ unsafe {
+ self.unlock();
+ }
+ false
+ } else {
+ true
+ }
+ }
+}
+
+pub struct RwLock {
+ inner: UnsafeCell<c::SRWLOCK>,
+}
+
+unsafe impl Send for RwLock {}
+unsafe impl Sync for RwLock {}
+
+impl RwLock {
+ #[inline]
+ pub const fn new() -> RwLock {
+ RwLock {
+ inner: UnsafeCell::new(c::SRWLOCK_INIT),
+ }
+ }
+ #[inline]
+ pub fn read(&self) {
+ unsafe { c::AcquireSRWLockShared(self.inner.get()) }
+ }
+ #[inline]
+ pub fn try_read(&self) -> bool {
+ unsafe { c::TryAcquireSRWLockShared(self.inner.get()) != 0 }
+ }
+ #[inline]
+ pub fn write(&self) {
+ unsafe { c::AcquireSRWLockExclusive(self.inner.get()) }
+ }
+ #[inline]
+ pub fn try_write(&self) -> bool {
+ unsafe { c::TryAcquireSRWLockExclusive(self.inner.get()) != 0 }
+ }
+ #[inline]
+ pub unsafe fn read_unlock(&self) {
+ c::ReleaseSRWLockShared(self.inner.get())
+ }
+ #[inline]
+ pub unsafe fn write_unlock(&self) {
+ c::ReleaseSRWLockExclusive(self.inner.get())
+ }
+}