kernel/sync/spin/
remutex.rs1use core::{
2 cell::Cell,
3 fmt,
4 marker::PhantomData,
5 ops::Deref,
6 sync::atomic::{AtomicI64, Ordering},
7};
8
9use crate::cpu;
10
11use super::lock;
12
13pub struct ReMutex<T> {
19 lock: lock::Lock,
20 owner_cpu: AtomicI64,
21 lock_count: Cell<usize>,
22 data: T,
23}
24
25unsafe impl<T: Send> Send for ReMutex<T> {}
26unsafe impl<T: Send> Sync for ReMutex<T> {}
27
28impl<T> fmt::Debug for ReMutex<T>
29where
30 T: fmt::Debug,
31{
32 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
33 let mut s = f.debug_struct("ReMutex");
34 s.field("owner_cpu", &self.owner_cpu)
35 .field("lock_count", &self.lock_count);
36 if let Some(data) = self.try_lock() {
37 s.field("data", &data);
38 } else {
39 s.field("data", &"[locked]");
40 }
41 s.finish()
42 }
43}
44
45#[must_use]
46pub struct ReMutexGuard<'a, T: 'a> {
47 lock: &'a ReMutex<T>,
48 marker: PhantomData<*const ()>, }
50
51impl<T> fmt::Debug for ReMutexGuard<'_, T>
52where
53 T: fmt::Debug,
54{
55 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
56 self.deref().fmt(f)
57 }
58}
59
60impl<T> ReMutex<T> {
61 pub const fn new(data: T) -> Self {
62 Self {
63 lock: lock::Lock::new(),
64 owner_cpu: AtomicI64::new(-1),
65 lock_count: Cell::new(0),
66 data,
67 }
68 }
69
70 pub fn lock(&self) -> ReMutexGuard<'_, T> {
71 let cpu = cpu::cpu();
72 cpu.push_cli(); let cpu_id = cpu.id as i64;
74
75 if self.owner_cpu.load(Ordering::Relaxed) == cpu_id {
76 assert!(self.lock_count.get() > 0);
77 assert!(cpu.n_cli() > 0 && cpu.interrupts_disabled());
78 self.lock_count.set(
79 self.lock_count
80 .get()
81 .checked_add(1)
82 .expect("ReMutex lock count overflow"),
83 );
84 ReMutexGuard {
85 lock: self,
86 marker: PhantomData,
87 }
88 } else {
89 self.lock.write_lock();
90 self.owner_cpu.store(cpu_id, Ordering::Relaxed);
91 self.lock_count.set(1);
92 ReMutexGuard {
93 lock: self,
94 marker: PhantomData,
95 }
96 }
97 }
98
99 pub fn try_lock(&self) -> Option<ReMutexGuard<'_, T>> {
100 let cpu = cpu::cpu();
101 cpu.push_cli(); let cpu_id = cpu.id as i64;
103
104 if self.owner_cpu.load(Ordering::Relaxed) == cpu_id {
105 assert!(self.lock_count.get() > 0);
106 self.lock_count.set(
107 self.lock_count
108 .get()
109 .checked_add(1)
110 .expect("ReMutex lock count overflow"),
111 );
112 Some(ReMutexGuard {
113 lock: self,
114 marker: PhantomData,
115 })
116 } else if self.lock.try_write_lock() {
117 self.owner_cpu.store(cpu_id, Ordering::Relaxed);
119 self.lock_count.set(1);
120 Some(ReMutexGuard {
121 lock: self,
122 marker: PhantomData,
123 })
124 } else {
125 None
126 }
127 }
128
129 #[allow(dead_code)]
130 pub fn get_mut(&mut self) -> &mut T {
131 &mut self.data
132 }
133}
134
135impl<T> core::ops::Deref for ReMutexGuard<'_, T> {
136 type Target = T;
137
138 fn deref(&self) -> &Self::Target {
139 &self.lock.data
140 }
141}
142
143impl<T> Drop for ReMutexGuard<'_, T> {
144 fn drop(&mut self) {
145 self.lock.lock_count.set(
146 self.lock
147 .lock_count
148 .get()
149 .checked_sub(1)
150 .expect("ReMutex lock count underflow"),
151 );
152 if self.lock.lock_count.get() == 0 {
153 self.lock.owner_cpu.store(-1, Ordering::Relaxed);
154 unsafe { self.lock.lock.write_unlock() };
156 }
157 cpu::cpu().pop_cli();
160 }
161}