1use std::{
2 collections::HashMap,
3 mem::MaybeUninit,
4 ptr::NonNull,
5 sync::{Arc, OnceLock},
6};
7
8use dynlink::{compartment::Compartment, tls::TlsRegion};
9use monitor_api::{RuntimeThreadControl, MONITOR_INSTANCE_ID};
10use twizzler_abi::{
11 object::NULLPAGE_SIZE,
12 syscall::{sys_spawn, sys_thread_exit, ThreadSyncSleep, UpcallTargetSpawnOption},
13 thread::{ExecutionState, ThreadRepr},
14 upcall::{UpcallFlags, UpcallInfo, UpcallMode, UpcallOptions, UpcallTarget},
15};
16use twizzler_rt_abi::{
17 error::{GenericError, TwzError},
18 object::{MapFlags, ObjID},
19};
20
21use super::{
22 get_monitor,
23 space::{MapHandle, MapInfo},
24};
25use crate::{gates::ThreadMgrStats, mon::space::Space};
26
27mod cleaner;
28pub(crate) use cleaner::ThreadCleaner;
29
30pub const SUPER_UPCALL_STACK_SIZE: usize = 8 * 1024 * 1024; pub const DEFAULT_STACK_SIZE: usize = 8 * 1024 * 1024; pub const STACK_SIZE_MIN_ALIGN: usize = 0x1000; pub struct ThreadMgr {
41 all: HashMap<ObjID, ManagedThread>,
42 cleaner: OnceLock<cleaner::ThreadCleaner>,
43 next_id: u32,
44 id_stack: Vec<u32>,
45}
46
47impl Default for ThreadMgr {
48 fn default() -> Self {
49 Self {
50 all: HashMap::default(),
51 cleaner: OnceLock::new(),
52 next_id: 1,
53 id_stack: Vec::new(),
54 }
55 }
56}
57
58struct IdDropper<'a> {
59 mgr: &'a mut ThreadMgr,
60 id: u32,
61}
62
63impl<'a> IdDropper<'a> {
64 pub fn freeze(self) -> u32 {
65 let id = self.id;
66 std::mem::forget(self);
67 id
68 }
69}
70
71impl<'a> Drop for IdDropper<'a> {
72 fn drop(&mut self) {
73 self.mgr.release_super_tid(self.id);
74 }
75}
76
77impl ThreadMgr {
78 pub(super) fn set_cleaner(&mut self, cleaner: cleaner::ThreadCleaner) {
79 self.cleaner.set(cleaner).ok().unwrap();
80 }
81
82 fn next_super_tid(&mut self) -> IdDropper<'_> {
83 let id = self.id_stack.pop().unwrap_or_else(|| {
84 let id = self.next_id;
85 self.next_id += 1;
86 id
87 });
88 IdDropper { mgr: self, id }
89 }
90
91 fn release_super_tid(&mut self, id: u32) {
92 self.id_stack.push(id);
93 }
94
95 fn do_remove(&mut self, thread: &ManagedThread) {
96 self.all.remove(&thread.id);
97 self.release_super_tid(thread.super_tid);
98 if let Some(cleaner) = self.cleaner.get() {
99 cleaner.untrack(thread.id);
100 }
101 }
102
103 pub fn stat(&self) -> ThreadMgrStats {
104 ThreadMgrStats {
105 nr_threads: self.all.len(),
106 }
107 }
108
109 unsafe fn spawn_thread(
110 start: usize,
111 super_stack_start: usize,
112 super_thread_pointer: usize,
113 arg: usize,
114 ) -> Result<ObjID, TwzError> {
115 let upcall_target = UpcallTarget::new(
116 None,
117 Some(twizzler_rt_abi::arch::__twz_rt_upcall_entry),
118 super_stack_start,
119 SUPER_UPCALL_STACK_SIZE,
120 super_thread_pointer,
121 MONITOR_INSTANCE_ID,
122 [UpcallOptions {
123 flags: UpcallFlags::empty(),
124 mode: UpcallMode::CallSuper,
125 }; UpcallInfo::NR_UPCALLS],
126 );
127
128 sys_spawn(twizzler_abi::syscall::ThreadSpawnArgs {
129 entry: start,
130 stack_base: super_stack_start,
131 stack_size: SUPER_UPCALL_STACK_SIZE,
132 tls: super_thread_pointer,
133 arg,
134 flags: twizzler_abi::syscall::ThreadSpawnFlags::empty(),
135 vm_context_handle: None,
136 upcall_target: UpcallTargetSpawnOption::SetTo(upcall_target),
137 })
138 }
139
140 fn do_spawn(
141 &mut self,
142 monitor_dynlink_comp: &mut Compartment,
143 start: unsafe extern "C" fn(usize) -> !,
144 arg: usize,
145 main_thread_comp: Option<ObjID>,
146 ) -> Result<ManagedThread, TwzError> {
147 let super_tls = monitor_dynlink_comp
148 .build_tls_region(RuntimeThreadControl::default(), |layout| unsafe {
149 NonNull::new(std::alloc::alloc_zeroed(layout))
150 })
151 .map_err(|_| GenericError::Internal)?;
152 let super_tid = self.next_super_tid().freeze();
153 unsafe {
154 let tcb = super_tls.get_thread_control_block::<RuntimeThreadControl>();
155 (*tcb).runtime_data.set_id(super_tid);
156 }
157 let super_thread_pointer = super_tls.get_thread_pointer_value();
158 let super_stack = Box::new_zeroed_slice(SUPER_UPCALL_STACK_SIZE);
159 let id = unsafe {
160 Self::spawn_thread(
161 start as *const () as usize,
162 super_stack.as_ptr() as usize,
163 super_thread_pointer,
164 arg,
165 )?
166 };
167 let repr = Space::map(
168 &get_monitor().space,
169 MapInfo {
170 id,
171 flags: MapFlags::READ,
172 },
173 )
174 .unwrap();
175 Ok(Arc::new(ManagedThreadInner {
176 id,
177 super_tid,
178 repr: ManagedThreadRepr::new(repr),
179 _super_stack: super_stack,
180 _super_tls: super_tls,
181 main_thread_comp,
182 }))
183 }
184
185 pub fn start_thread(
188 &mut self,
189 monitor_dynlink_comp: &mut Compartment,
190 main: Box<dyn FnOnce()>,
191 main_thread_comp: Option<ObjID>,
192 ) -> Result<ManagedThread, TwzError> {
193 let main_addr = Box::into_raw(Box::new(main)) as usize;
194 unsafe extern "C" fn managed_thread_entry(main: usize) -> ! {
195 {
196 let main = Box::from_raw(main as *mut Box<dyn FnOnce()>);
197 main();
198 }
199
200 sys_thread_exit(0);
201 }
202
203 let mt = self.do_spawn(
204 monitor_dynlink_comp,
205 managed_thread_entry,
206 main_addr,
207 main_thread_comp,
208 );
209 if let Ok(ref mt) = mt {
210 if let Some(cleaner) = self.cleaner.get() {
211 cleaner.track(mt.clone());
212 }
213 }
214 mt
215 }
216}
217
218pub struct ManagedThreadInner {
220 pub id: ObjID,
222 pub super_tid: u32,
223 pub(crate) repr: ManagedThreadRepr,
225 _super_stack: Box<[MaybeUninit<u8>]>,
226 _super_tls: TlsRegion,
227 pub main_thread_comp: Option<ObjID>,
228}
229
230impl ManagedThreadInner {
231 pub fn has_exited(&self) -> bool {
233 self.repr.get_repr().get_state() == ExecutionState::Exited
234 }
235
236 pub fn waitable_until_exit(&self) -> ThreadSyncSleep {
238 self.repr.get_repr().waitable(ExecutionState::Exited)
239 }
240}
241
242unsafe impl Send for ManagedThreadInner {}
244unsafe impl Sync for ManagedThreadInner {}
245
246impl core::fmt::Debug for ManagedThreadInner {
247 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
248 write!(f, "ManagedThread({})", self.id)
249 }
250}
251
252impl Drop for ManagedThreadInner {
253 fn drop(&mut self) {
254 tracing::trace!("dropping ManagedThread {}", self.id);
255 }
256}
257
258pub type ManagedThread = Arc<ManagedThreadInner>;
260
261pub(crate) struct ManagedThreadRepr {
263 handle: MapHandle,
264}
265
266impl ManagedThreadRepr {
267 fn new(handle: MapHandle) -> Self {
268 Self { handle }
269 }
270
271 pub fn get_repr(&self) -> &ThreadRepr {
273 let addr = self.handle.addrs().start + NULLPAGE_SIZE;
274 unsafe { (addr as *const ThreadRepr).as_ref().unwrap() }
275 }
276}