twizzler_abi/
simple_mutex.rs

1//! Very simple and unsafe Mutex for internal locking needs. DO NOT USE, USE THE RUST STANDARD
2//! LIBRARY MUTEX INSTEAD.
3
4use core::{
5    cell::UnsafeCell,
6    sync::atomic::{AtomicU64, Ordering},
7};
8
9use crate::syscall::{
10    sys_thread_sync, ThreadSync, ThreadSyncFlags, ThreadSyncOp, ThreadSyncReference,
11    ThreadSyncSleep, ThreadSyncWake,
12};
13
14/// Simple mutex, supporting sleeping and wakeup. Does no attempt at handling priority or fairness.
15pub(crate) struct MutexImp {
16    lock: AtomicU64,
17}
18
19unsafe impl Send for MutexImp {}
20
21impl MutexImp {
22    /// Construct a new mutex.
23    #[allow(dead_code)]
24    pub const fn new() -> MutexImp {
25        MutexImp {
26            lock: AtomicU64::new(0),
27        }
28    }
29
30    #[allow(dead_code)]
31    pub fn is_locked(&self) -> bool {
32        self.lock.load(Ordering::SeqCst) != 0
33    }
34
35    #[inline]
36    /// Lock a mutex, which can be unlocked by calling [Mutex::unlock].
37    /// # Safety
38    /// The caller must ensure that they are not recursively locking, that they unlock the
39    /// mutex correctly, and that any data protected by the mutex is only accessed with the mutex
40    /// locked.
41    ///
42    /// Note, this is why you should use the standard library mutex, which enforces all of these
43    /// things.
44    #[allow(dead_code)]
45    pub unsafe fn lock(&self) {
46        for _ in 0..100 {
47            let result = self
48                .lock
49                .compare_exchange_weak(0, 1, Ordering::SeqCst, Ordering::SeqCst);
50            if result.is_ok() {
51                return;
52            }
53            core::hint::spin_loop();
54        }
55        let _ = self
56            .lock
57            .compare_exchange(1, 2, Ordering::SeqCst, Ordering::SeqCst);
58        let sleep = ThreadSync::new_sleep(ThreadSyncSleep::new(
59            ThreadSyncReference::Virtual(&self.lock),
60            2,
61            ThreadSyncOp::Equal,
62            ThreadSyncFlags::empty(),
63        ));
64        loop {
65            let state = self.lock.swap(2, Ordering::SeqCst);
66            if state == 0 {
67                break;
68            }
69            let _ = sys_thread_sync(&mut [sleep], None);
70        }
71    }
72
73    #[inline]
74    /// Unlock a mutex locked with [Mutex::lock].
75    /// # Safety
76    /// Must be the current owner of the locked mutex and must make sure to unlock properly.
77    #[allow(dead_code)]
78    pub unsafe fn unlock(&self) {
79        if self.lock.swap(0, Ordering::SeqCst) == 1 {
80            return;
81        }
82        for _ in 0..200 {
83            if self.lock.load(Ordering::SeqCst) > 0
84                && self
85                    .lock
86                    .compare_exchange(1, 2, Ordering::SeqCst, Ordering::SeqCst)
87                    != Err(0)
88            {
89                return;
90            }
91            core::hint::spin_loop();
92        }
93        let wake = ThreadSync::new_wake(ThreadSyncWake::new(
94            ThreadSyncReference::Virtual(&self.lock),
95            1,
96        ));
97        let _ = sys_thread_sync(&mut [wake], None);
98    }
99
100    #[inline]
101    /// Similar to [Mutex::lock], but if we can't immediately grab the lock, don't and return false.
102    /// Return true if we got the lock.
103    /// # Safety
104    /// Same safety concerns as [Mutex::lock], but now you have to check to see if the lock happened
105    /// or not.
106    #[allow(dead_code)]
107    pub unsafe fn try_lock(&self) -> bool {
108        self.lock
109            .compare_exchange_weak(0, 1, Ordering::SeqCst, Ordering::SeqCst)
110            .is_ok()
111    }
112}
113
114pub struct Mutex<T> {
115    imp: MutexImp,
116    data: UnsafeCell<T>,
117}
118
119impl<T: Default> Default for Mutex<T> {
120    fn default() -> Self {
121        Self {
122            imp: MutexImp::new(),
123            data: Default::default(),
124        }
125    }
126}
127
128impl<T> Mutex<T> {
129    #[allow(dead_code)]
130    pub const fn new(data: T) -> Self {
131        Self {
132            imp: MutexImp::new(),
133            data: UnsafeCell::new(data),
134        }
135    }
136
137    #[allow(dead_code)]
138    pub fn lock(&self) -> LockGuard<'_, T> {
139        unsafe {
140            self.imp.lock();
141        }
142        LockGuard { lock: self }
143    }
144
145    #[allow(dead_code)]
146    pub fn try_lock(&self) -> Option<LockGuard<'_, T>> {
147        unsafe {
148            if !self.imp.try_lock() {
149                return None;
150            }
151        }
152        Some(LockGuard { lock: self })
153    }
154}
155
156unsafe impl<T> Send for Mutex<T> where T: Send {}
157unsafe impl<T> Sync for Mutex<T> where T: Send {}
158unsafe impl<T> Send for LockGuard<'_, T> where T: Send {}
159unsafe impl<T> Sync for LockGuard<'_, T> where T: Send + Sync {}
160
161pub struct LockGuard<'a, T> {
162    lock: &'a Mutex<T>,
163}
164
165impl<T> core::ops::Deref for LockGuard<'_, T> {
166    type Target = T;
167    fn deref(&self) -> &Self::Target {
168        unsafe { &*self.lock.data.get() }
169    }
170}
171
172impl<T> core::ops::DerefMut for LockGuard<'_, T> {
173    fn deref_mut(&mut self) -> &mut Self::Target {
174        unsafe { &mut *self.lock.data.get() }
175    }
176}
177
178impl<T> Drop for LockGuard<'_, T> {
179    fn drop(&mut self) {
180        unsafe { self.lock.imp.unlock() };
181    }
182}