kernel/sync/spin/
mutex.rs1use core::{
2 cell::UnsafeCell,
3 fmt,
4 marker::PhantomData,
5 ops::{Deref, DerefMut},
6 sync::atomic::{AtomicI64, Ordering},
7};
8
9use crate::cpu;
10
11use super::lock;
12
13pub struct Mutex<T: ?Sized> {
14 lock: lock::Lock,
15 owner_cpu: AtomicI64,
16 data: UnsafeCell<T>,
17}
18
19unsafe impl<T: ?Sized + Send> Send for Mutex<T> {}
20unsafe impl<T: ?Sized + Send> Sync for Mutex<T> {}
21
22impl<T> fmt::Debug for Mutex<T>
23where
24 T: fmt::Debug,
25{
26 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
27 let mut s = f.debug_struct("Mutex");
28 s.field("owner_cpu", &self.owner_cpu);
29 if let Some(data) = self.try_lock() {
30 s.field("data", &data);
31 } else {
32 s.field("data", &"[locked]");
33 }
34 s.finish()
35 }
36}
37
38#[must_use]
39pub struct MutexGuard<'a, T: ?Sized + 'a> {
40 lock: &'a Mutex<T>,
41 marker: PhantomData<*const ()>, }
43
44unsafe impl<T: ?Sized + Sync> Sync for MutexGuard<'_, T> {}
45
46impl<T: ?Sized + fmt::Debug> fmt::Debug for MutexGuard<'_, T> {
47 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
48 (**self).fmt(f)
49 }
50}
51
52impl<T: ?Sized + fmt::Display> fmt::Display for MutexGuard<'_, T> {
53 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
54 (**self).fmt(f)
55 }
56}
57
58impl<T> Mutex<T> {
59 pub const fn new(data: T) -> Self {
60 Self {
61 lock: lock::Lock::new(),
62 owner_cpu: AtomicI64::new(-1),
63 data: UnsafeCell::new(data),
64 }
65 }
66}
67
68impl<T: ?Sized> Mutex<T> {
69 pub fn lock(&self) -> MutexGuard<'_, T> {
70 let cpu = cpu::cpu();
71 cpu.push_cli(); let cpu_id = cpu.id as i64;
73
74 if self.owner_cpu.load(Ordering::Relaxed) == cpu_id {
75 panic!("Mutex already locked by this CPU");
76 } else {
77 self.lock.write_lock();
78 self.owner_cpu.store(cpu_id, Ordering::Relaxed);
79 MutexGuard {
80 lock: self,
81 marker: PhantomData,
82 }
83 }
84 }
85
86 pub fn try_lock(&self) -> Option<MutexGuard<'_, T>> {
87 let cpu = cpu::cpu();
88 cpu.push_cli(); let cpu_id = cpu.id as i64;
90
91 if self.owner_cpu.load(Ordering::Relaxed) == cpu_id {
92 cpu.pop_cli();
94 None
95 } else if self.lock.try_write_lock() {
96 self.owner_cpu.store(cpu_id, Ordering::Relaxed);
97 Some(MutexGuard {
98 lock: self,
99 marker: PhantomData,
100 })
101 } else {
102 cpu.pop_cli();
103 None
104 }
105 }
106
107 #[allow(dead_code)]
113 pub fn run_with<'a, R>(&'a self, f: impl FnOnce(&'a T) -> R) -> R {
114 let guard: MutexGuard<'a, T> = self.lock();
115 let d = unsafe { guard.lock.data.get().as_ref().unwrap() };
116 f(d)
117 }
118
119 #[allow(dead_code)]
125 pub fn run_with_mut<'a, R>(&'a self, f: impl FnOnce(&'a mut T) -> R) -> R {
126 let guard: MutexGuard<'a, T> = self.lock();
127 let d = unsafe { guard.lock.data.get().as_mut().unwrap() };
128 f(d)
129 }
130
131 #[allow(dead_code)]
134 pub fn get_mut(&mut self) -> &mut T {
135 self.data.get_mut()
136 }
137}
138
139impl<T: ?Sized> Deref for MutexGuard<'_, T> {
140 type Target = T;
141
142 fn deref(&self) -> &Self::Target {
143 unsafe { self.lock.data.get().as_ref().unwrap() }
146 }
147}
148
149impl<T: ?Sized> DerefMut for MutexGuard<'_, T> {
150 fn deref_mut(&mut self) -> &mut Self::Target {
151 unsafe { self.lock.data.get().as_mut().unwrap() }
154 }
155}
156
157impl<T: ?Sized> Drop for MutexGuard<'_, T> {
158 fn drop(&mut self) {
159 self.lock.owner_cpu.store(-1, Ordering::Relaxed);
160 unsafe { self.lock.lock.write_unlock() };
162 cpu::cpu().pop_cli(); }
164}