kernel/sync/spin/
lock.rs

1use core::sync::atomic::{AtomicU64, Ordering};
2
3use crate::sync::cache_padded::CachePadded;
4
5// this is taken from `rust`, `futex_rwlock` in `unix` without the `futex`
6// implementation, since we don't have that.
7//
8// Bits 0..30:
9//   0: Unlocked
10//   1..=0x3FFF_FFFE: Locked by N readers
11//   0x3FFF_FFFF: Write locked
12const UNLOCKED: u64 = 0;
13const READ_LOCKED: u64 = 1;
14const MASK: u64 = (1 << 30) - 1;
15const WRITE_LOCKED: u64 = MASK;
16const MAX_READERS: u64 = MASK - 1;
17
18#[inline]
19fn is_read_lockable(state: u64) -> bool {
20    // This also returns false if the counter could overflow if we tried to read lock it.
21    //
22    // We don't allow read-locking if there's readers waiting, even if the lock is unlocked
23    // and there's no writers waiting. The only situation when this happens is after unlocking,
24    // at which point the unlocking thread might be waking up writers, which have priority over readers.
25    // The unlocking thread will clear the readers waiting bit and wake up readers, if necessary.
26    state & MASK < MAX_READERS
27}
28
29/// A raw spin lock, provides `read_lock`, `read_unlock`, `write_lock`, and `write_unlock`
30///
31/// This raw is designed for the broader case of `RwLock`, but with `write_lock`, `write_unlock` it
32/// acts as a simple boolean look, that is used in Mutexes
33///
34/// This is an unsafe lock, it doesn't have any protection against deadlocks, or multiple locking
35/// A safe wrappers are implemented with `Mutex`, `ReMutex`, and `RwLock`
36pub(super) struct Lock {
37    state: CachePadded<AtomicU64>,
38}
39
40impl Lock {
41    pub const fn new() -> Self {
42        Self {
43            state: CachePadded::new(AtomicU64::new(UNLOCKED)),
44        }
45    }
46
47    pub fn read_lock(&self) {
48        while !self.try_read_lock() {
49            loop {
50                let state = self.state.load(Ordering::Relaxed);
51                if is_read_lockable(state) {
52                    break;
53                }
54                core::hint::spin_loop();
55            }
56        }
57    }
58
59    #[must_use]
60    #[inline(always)]
61    /// Try to lock the lock, returns true if successful
62    pub fn try_read_lock(&self) -> bool {
63        self.state
64            .fetch_update(Ordering::Acquire, Ordering::Relaxed, |s| {
65                is_read_lockable(s).then(|| s + READ_LOCKED)
66            })
67            .is_ok()
68    }
69
70    pub unsafe fn read_unlock(&self) {
71        self.state.fetch_sub(READ_LOCKED, Ordering::Release);
72    }
73
74    pub fn write_lock(&self) {
75        // only try to lock once, then loop until we can, then try again
76        // this reduces `cache exclusion` and improve performance
77        while !self.try_write_lock() {
78            while self.state.load(Ordering::Relaxed) != UNLOCKED {
79                core::hint::spin_loop();
80            }
81        }
82    }
83
84    #[must_use]
85    #[inline(always)]
86    /// Try to lock the lock, returns true if successful
87    pub fn try_write_lock(&self) -> bool {
88        self.state
89            .compare_exchange_weak(UNLOCKED, WRITE_LOCKED, Ordering::Acquire, Ordering::Relaxed)
90            .is_ok()
91    }
92
93    /// SAFETY: the caller must assure that there is only one accessor for this lock
94    ///         we don't want multiple unlocks, it doesn't make sense for this Lock (check `super::remutex::ReMutex`)
95    pub unsafe fn write_unlock(&self) {
96        let state = self.state.fetch_sub(WRITE_LOCKED, Ordering::Release) - WRITE_LOCKED;
97        assert_eq!(state, UNLOCKED);
98    }
99}