twizzler_abi/
simple_mutex.rs1use core::{
5 cell::UnsafeCell,
6 sync::atomic::{AtomicU64, Ordering},
7};
8
9use crate::syscall::{
10 sys_thread_sync, ThreadSync, ThreadSyncFlags, ThreadSyncOp, ThreadSyncReference,
11 ThreadSyncSleep, ThreadSyncWake,
12};
13
14pub struct MutexImp {
16 lock: AtomicU64,
17 caller: UnsafeCell<&'static core::panic::Location<'static>>,
18}
19
20unsafe impl Send for MutexImp {}
21unsafe impl Sync for MutexImp {}
22
23impl MutexImp {
24 #[allow(dead_code)]
26 pub const fn new() -> MutexImp {
27 MutexImp {
28 lock: AtomicU64::new(0),
29 caller: UnsafeCell::new(core::panic::Location::caller()),
30 }
31 }
32
33 #[allow(dead_code)]
34 pub fn is_locked(&self) -> bool {
35 self.lock.load(Ordering::SeqCst) != 0
36 }
37
38 #[inline]
39 #[allow(dead_code)]
48 #[track_caller]
49 pub unsafe fn lock(&self, caller: &'static core::panic::Location<'static>) {
50 for _ in 0..100 {
51 let result = self
52 .lock
53 .compare_exchange_weak(0, 1, Ordering::SeqCst, Ordering::SeqCst);
54 if result.is_ok() {
55 unsafe { self.caller.get().write(caller) };
56 return;
57 }
58 core::hint::spin_loop();
59 }
60 let _ = self
61 .lock
62 .compare_exchange(1, 2, Ordering::SeqCst, Ordering::SeqCst);
63 let sleep = ThreadSync::new_sleep(ThreadSyncSleep::new(
64 ThreadSyncReference::Virtual(&self.lock),
65 2,
66 ThreadSyncOp::Equal,
67 ThreadSyncFlags::empty(),
68 ));
69 loop {
70 let state = self.lock.swap(2, Ordering::SeqCst);
71 if state == 0 {
72 break;
73 }
74 let _ = sys_thread_sync(&mut [sleep], None);
75 }
76 unsafe { self.caller.get().write(caller) };
77 }
78
79 #[inline]
80 #[allow(dead_code)]
84 pub unsafe fn unlock(&self) {
85 if self.lock.swap(0, Ordering::SeqCst) == 1 {
86 return;
87 }
88 for _ in 0..200 {
89 if self.lock.load(Ordering::SeqCst) > 0
90 && self
91 .lock
92 .compare_exchange(1, 2, Ordering::SeqCst, Ordering::SeqCst)
93 != Err(0)
94 {
95 return;
96 }
97 core::hint::spin_loop();
98 }
99 let wake = ThreadSync::new_wake(ThreadSyncWake::new(
100 ThreadSyncReference::Virtual(&self.lock),
101 1,
102 ));
103 let _ = sys_thread_sync(&mut [wake], None);
104 }
105
106 #[inline]
107 #[allow(dead_code)]
113 pub unsafe fn try_lock(&self) -> bool {
114 self.lock
115 .compare_exchange_weak(0, 1, Ordering::SeqCst, Ordering::SeqCst)
116 .is_ok()
117 }
118}
119
120pub struct Mutex<T> {
121 imp: MutexImp,
122 data: UnsafeCell<T>,
123}
124
125impl<T: Default> Default for Mutex<T> {
126 fn default() -> Self {
127 Self {
128 imp: MutexImp::new(),
129 data: Default::default(),
130 }
131 }
132}
133
134impl<T> Mutex<T> {
135 #[allow(dead_code)]
136 pub const fn new(data: T) -> Self {
137 Self {
138 imp: MutexImp::new(),
139 data: UnsafeCell::new(data),
140 }
141 }
142
143 #[allow(dead_code)]
144 #[track_caller]
145 pub fn lock(&self) -> LockGuard<'_, T> {
146 unsafe {
147 self.imp.lock(core::panic::Location::caller());
148 }
149 LockGuard { lock: self }
150 }
151
152 #[allow(dead_code)]
153 pub fn try_lock(&self) -> Option<LockGuard<'_, T>> {
154 unsafe {
155 if !self.imp.try_lock() {
156 return None;
157 }
158 }
159 Some(LockGuard { lock: self })
160 }
161}
162
163unsafe impl<T> Send for Mutex<T> where T: Send {}
164unsafe impl<T> Sync for Mutex<T> where T: Send {}
165unsafe impl<T> Send for LockGuard<'_, T> where T: Send {}
166unsafe impl<T> Sync for LockGuard<'_, T> where T: Send + Sync {}
167
168pub struct LockGuard<'a, T> {
169 lock: &'a Mutex<T>,
170}
171
172impl<T> core::ops::Deref for LockGuard<'_, T> {
173 type Target = T;
174 fn deref(&self) -> &Self::Target {
175 unsafe { &*self.lock.data.get() }
176 }
177}
178
179impl<T> core::ops::DerefMut for LockGuard<'_, T> {
180 fn deref_mut(&mut self) -> &mut Self::Target {
181 unsafe { &mut *self.lock.data.get() }
182 }
183}
184
185impl<T> Drop for LockGuard<'_, T> {
186 fn drop(&mut self) {
187 unsafe { self.lock.imp.unlock() };
188 }
189}