1use std::{
2 alloc::Layout,
3 collections::HashMap,
4 ffi::{CStr, CString},
5 ptr::NonNull,
6 sync::atomic::{AtomicU64, Ordering},
7};
8
9use dynlink::{compartment::CompartmentId, context::Context};
10use monitor_api::{CompartmentFlags, RuntimeThreadControl, SharedCompConfig, TlsTemplateInfo};
11use secgate::util::SimpleBuffer;
12use talc::{ErrOnOom, Talc};
13use twizzler_abi::{
14 syscall::{
15 DeleteFlags, ObjectControlCmd, ThreadSync, ThreadSyncFlags, ThreadSyncOp,
16 ThreadSyncReference, ThreadSyncSleep, ThreadSyncWake,
17 },
18 upcall::{ResumeFlags, UpcallData, UpcallFrame},
19};
20use twizzler_rt_abi::{
21 core::{CompartmentInitInfo, CtorSet, InitInfoPtrs, RuntimeInfo, RUNTIME_INIT_COMP},
22 error::TwzError,
23 object::{MapFlags, ObjID},
24};
25
26use super::{compconfig::CompConfigObject, compthread::CompThread, StackObject};
27use crate::{
28 gates::ThreadInfo,
29 mon::{
30 get_monitor,
31 space::{MapHandle, MapInfo, Space},
32 thread::ThreadMgr,
33 },
34};
35
36pub const COMP_READY: u64 = CompartmentFlags::READY.bits();
38pub const COMP_IS_BINARY: u64 = CompartmentFlags::IS_BINARY.bits();
40pub const COMP_THREAD_CAN_EXIT: u64 = CompartmentFlags::THREAD_CAN_EXIT.bits();
42pub const COMP_STARTED: u64 = CompartmentFlags::STARTED.bits();
44pub const COMP_DESTRUCTED: u64 = CompartmentFlags::DESTRUCTED.bits();
46pub const COMP_EXITED: u64 = CompartmentFlags::EXITED.bits();
48
49pub struct RunComp {
51 pub sctx: ObjID,
53 pub instance: ObjID,
55 pub name: String,
57 pub compartment_id: CompartmentId,
59 main: Option<CompThread>,
60 pub deps: Vec<ObjID>,
61 comp_config_object: CompConfigObject,
62 alloc: Talc<ErrOnOom>,
63 mapped_objects: HashMap<MapInfo, MapHandle>,
64 flags: Box<AtomicU64>,
65 per_thread: HashMap<ObjID, PerThread>,
66 init_info: Option<(StackObject, usize, Vec<CtorSet>)>,
67 is_debugging: bool,
68 pub(crate) use_count: u64,
69}
70
71impl Drop for RunComp {
72 fn drop(&mut self) {
73 let _ = twizzler_abi::syscall::sys_object_ctrl(
75 self.instance,
76 ObjectControlCmd::Delete(DeleteFlags::empty()),
77 )
78 .inspect_err(|e| tracing::warn!("failed to delete instance on RunComp drop: {}", e));
79 }
80}
81
82impl core::fmt::Debug for RunComp {
83 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
84 f.debug_struct("RunComp")
85 .field("sctx", &self.sctx)
86 .field("instance", &self.instance)
87 .field("name", &self.name)
88 .field("deps", &self.deps)
89 .field("usecount", &self.use_count)
90 .field("dynlink_id", &self.compartment_id)
91 .finish_non_exhaustive()
92 }
93}
94
95pub struct PerThread {
97 simple_buffer: Option<(SimpleBuffer, MapHandle)>,
98}
99
100impl PerThread {
101 fn new(instance: ObjID, _th: ObjID) -> Self {
106 let handle = Space::safe_create_and_map_runtime_object(
107 &get_monitor().space,
108 instance,
109 MapFlags::READ | MapFlags::WRITE,
110 )
111 .ok();
112
113 Self {
114 simple_buffer: handle
115 .map(|handle| (SimpleBuffer::new(unsafe { handle.object_handle() }), handle)),
116 }
117 }
118
119 pub fn write_bytes(&mut self, bytes: &[u8]) -> usize {
121 self.simple_buffer
122 .as_mut()
123 .map(|sb| sb.0.write(bytes))
124 .unwrap_or(0)
125 }
126
127 pub fn read_bytes(&mut self, len: usize) -> Vec<u8> {
129 let mut v = vec![0; len];
130 let readlen = self
131 .simple_buffer
132 .as_mut()
133 .map(|sb| sb.0.read(&mut v))
134 .unwrap_or(0);
135 v.truncate(readlen);
136 v
137 }
138
139 pub fn simple_buffer_id(&self) -> Option<ObjID> {
141 Some(self.simple_buffer.as_ref()?.0.handle().id())
142 }
143}
144
145impl RunComp {
146 #[allow(clippy::too_many_arguments)]
148 pub fn new(
149 sctx: ObjID,
150 instance: ObjID,
151 name: String,
152 compartment_id: CompartmentId,
153 deps: Vec<ObjID>,
154 comp_config_object: CompConfigObject,
155 flags: u64,
156 main_stack: StackObject,
157 entry: usize,
158 ctors: &[CtorSet],
159 is_debugging: bool,
160 ) -> Self {
161 let mut alloc = Talc::new(ErrOnOom);
162 unsafe { alloc.claim(comp_config_object.alloc_span()).unwrap() };
163 Self {
164 sctx,
165 is_debugging,
166 instance,
167 name,
168 compartment_id,
169 main: None,
170 deps,
171 comp_config_object,
172 alloc,
173 mapped_objects: HashMap::default(),
174 flags: Box::new(AtomicU64::new(flags)),
175 per_thread: HashMap::new(),
176 init_info: Some((main_stack, entry, ctors.to_vec())),
177 use_count: 0,
178 }
179 }
180
181 pub fn get_per_thread(&mut self, id: ObjID) -> &mut PerThread {
183 self.per_thread
184 .entry(id)
185 .or_insert_with(|| PerThread::new(self.instance, id))
186 }
187
188 pub fn clean_per_thread_data(&mut self, id: ObjID) {
190 self.per_thread.remove(&id);
191 }
192
193 pub fn map_object(&mut self, info: MapInfo, handle: MapHandle) -> Result<MapHandle, TwzError> {
195 self.mapped_objects.insert(info, handle.clone());
196 Ok(handle)
197 }
198
199 pub fn unmap_object(&mut self, info: MapInfo) -> Option<MapHandle> {
201 let x = self.mapped_objects.remove(&info);
202 if x.is_none() {
203 tracing::debug!(
205 "tried to comp-unmap an object that was not mapped by compartment ({}): {:?}",
206 self.name,
207 info
208 );
209 }
210 x
211 }
212
213 pub fn comp_config_ptr(&self) -> *const SharedCompConfig {
215 self.comp_config_object.get_comp_config()
216 }
217
218 pub fn monitor_new<T: Copy + Sized>(&mut self, data: T) -> Result<*mut T, ()> {
220 unsafe {
221 let place: NonNull<T> = self.alloc.malloc(Layout::new::<T>())?.cast();
222 place.as_ptr().write(data);
223 Ok(place.as_ptr())
224 }
225 }
226
227 pub fn monitor_new_slice<T: Copy + Sized>(&mut self, data: &[T]) -> Result<*mut T, ()> {
229 unsafe {
230 let place = self.alloc.malloc(Layout::array::<T>(data.len()).unwrap())?;
231 let slice = core::slice::from_raw_parts_mut(place.as_ptr() as *mut T, data.len());
232 slice.copy_from_slice(data);
233 Ok(place.as_ptr() as *mut T)
234 }
235 }
236
237 pub fn set_flag(&self, val: u64) {
239 tracing::trace!("compartment {} set flag {:x}", self.name, val);
240 self.flags.fetch_or(val, Ordering::SeqCst);
241 self.notify_state_changed();
242 }
243
244 pub fn cas_flag(&self, old: u64, new: u64) -> Result<u64, u64> {
246 let r = self
247 .flags
248 .compare_exchange(old, new, Ordering::SeqCst, Ordering::SeqCst);
249 if r.is_ok() {
250 tracing::trace!("compartment {} cas flag {:x} -> {:x}", self.name, old, new);
251 self.notify_state_changed();
252 }
253 r
254 }
255
256 pub fn notify_state_changed(&self) {
257 let _ = twizzler_abi::syscall::sys_thread_sync(
258 &mut [ThreadSync::new_wake(ThreadSyncWake::new(
259 ThreadSyncReference::Virtual(&*self.flags),
260 usize::MAX,
261 ))],
262 None,
263 );
264 }
265
266 pub fn has_flag(&self, flag: u64) -> bool {
268 self.flags.load(Ordering::SeqCst) & flag != 0
269 }
270
271 pub fn until_change(&self, cur: u64) -> ThreadSyncSleep {
274 ThreadSyncSleep::new(
275 ThreadSyncReference::Virtual(&*self.flags),
276 cur,
277 ThreadSyncOp::Equal,
278 ThreadSyncFlags::empty(),
279 )
280 }
281
282 pub fn raw_flags(&self) -> u64 {
284 self.flags.load(Ordering::SeqCst)
285 }
286
287 pub(crate) fn start_main_thread(
288 &mut self,
289 state: u64,
290 tmgr: &mut ThreadMgr,
291 dynlink: &mut Context,
292 args: &[&CStr],
293 env: &[&CStr],
294 suspend_on_start: bool,
295 ) -> Option<bool> {
296 if self.has_flag(COMP_STARTED) {
297 return Some(false);
298 }
299 let state = state & !COMP_STARTED;
300 if self
301 .flags
302 .compare_exchange(
303 state,
304 state | COMP_STARTED,
305 Ordering::SeqCst,
306 Ordering::SeqCst,
307 )
308 .is_err()
309 {
310 return None;
311 }
312
313 tracing::debug!("starting main thread for compartment {}", self.name);
314 debug_assert!(self.main.is_none());
315 let (stack, entry, ctors) = self.init_info.take().unwrap();
317 let mut build_init_info = || -> Option<_> {
318 let comp_config_info =
319 self.comp_config_object.get_comp_config() as *mut SharedCompConfig;
320 let ctors_in_comp = self.monitor_new_slice(&ctors).ok()?;
321
322 let mut args_in_comp: Vec<_> = args
324 .iter()
325 .map(|arg| self.monitor_new_slice(arg.to_bytes_with_nul()).unwrap())
326 .collect();
327
328 if args_in_comp.len() == 0 {
329 let cname = CString::new(self.name.as_bytes()).unwrap();
330 args_in_comp = vec![self.monitor_new_slice(cname.as_bytes()).unwrap()];
331 }
332 let argc = args_in_comp.len();
333
334 let mut envs_in_comp: Vec<_> = env
335 .iter()
336 .map(|arg| self.monitor_new_slice(arg.to_bytes_with_nul()).unwrap())
337 .collect();
338
339 args_in_comp.push(core::ptr::null_mut());
340 envs_in_comp.push(core::ptr::null_mut());
341
342 let args_in_comp_in_comp = self.monitor_new_slice(&args_in_comp).unwrap();
343 let envs_in_comp_in_comp = self.monitor_new_slice(&envs_in_comp).unwrap();
344
345 let comp_init_info = CompartmentInitInfo {
346 ctor_set_array: ctors_in_comp,
347 ctor_set_len: ctors.len(),
348 comp_config_info: comp_config_info.cast(),
349 };
350 let comp_init_info_in_comp = self.monitor_new(comp_init_info).ok()?;
351 let rtinfo = RuntimeInfo {
353 flags: 0,
354 kind: RUNTIME_INIT_COMP,
355 args: args_in_comp_in_comp.cast(),
356 argc,
357 envp: envs_in_comp_in_comp.cast(),
358 init_info: InitInfoPtrs {
359 comp: comp_init_info_in_comp,
360 },
361 };
362 self.monitor_new(rtinfo).ok()
363 };
364 let arg = match build_init_info() {
365 Some(arg) => arg as usize,
366 None => {
367 self.set_flag(COMP_EXITED);
368 return None;
369 }
370 };
371 if self.build_tls_template(dynlink).is_none() {
372 self.set_flag(COMP_EXITED);
373 return None;
374 }
375
376 let mt = match CompThread::new(
377 tmgr,
378 dynlink,
379 stack,
380 self.instance,
381 Some(self.instance),
382 entry,
383 arg,
384 suspend_on_start,
385 ) {
386 Ok(mt) => mt,
387 Err(_) => {
388 self.set_flag(COMP_EXITED);
389 return None;
390 }
391 };
392 self.main = Some(mt);
393 self.notify_state_changed();
394
395 Some(true)
396 }
397
398 fn build_tls_template(&mut self, dynlink: &mut Context) -> Option<()> {
399 let region = dynlink
400 .get_compartment_mut(self.compartment_id)
401 .unwrap()
402 .build_tls_region(RuntimeThreadControl::default(), |layout| {
403 unsafe { self.alloc.malloc(layout) }.ok()
404 })
405 .ok()?;
406
407 let template: TlsTemplateInfo = region.into();
408 let tls_template = self.monitor_new(template).ok()?;
409
410 let config = self.comp_config_object.read_comp_config();
411 config.set_tls_template(tls_template);
412 self.comp_config_object.write_config(config);
413 Some(())
414 }
415
416 #[allow(dead_code)]
417 pub fn read_error_code(&self) -> u64 {
418 let Some(ref main) = self.main else {
419 return 0;
420 };
421 main.thread.repr.get_repr().get_code()
422 }
423
424 pub fn get_nth_thread_info(&self, n: usize) -> Option<ThreadInfo> {
425 let Some(ref main) = self.main else {
426 return None;
427 };
428 if n == 0 {
429 return Some(ThreadInfo {
430 repr_id: main.thread.id,
431 });
432 }
433 self.per_thread
434 .keys()
435 .filter(|t| **t != main.thread.id)
436 .nth(n - 1)
437 .map(|id| ThreadInfo { repr_id: *id })
438 }
439
440 pub fn upcall_handle(
441 &self,
442 frame: &mut UpcallFrame,
443 info: &UpcallData,
444 ) -> Result<Option<ResumeFlags>, TwzError> {
445 let flags = if self.is_debugging {
446 tracing::info!("got monitor upcall {:?} {:?}", frame, info);
447 Some(ResumeFlags::SUSPEND)
448 } else {
449 tracing::warn!(
450 "supervisor exception in {}, thread {}: {:?}",
451 self.name,
452 info.thread_id,
453 info.info
454 );
455 None
456 };
457 Ok(flags)
458 }
459
460 pub(crate) fn inc_use_count(&mut self) {
461 self.use_count += 1;
462 tracing::trace!(
463 "compartment {} inc use count -> {}",
464 self.name,
465 self.use_count
466 );
467 }
468
469 pub(crate) fn dec_use_count(&mut self) -> bool {
470 debug_assert!(self.use_count > 0);
471 self.use_count -= 1;
472
473 tracing::trace!(
474 "compartment {} dec use count -> {}",
475 self.name,
476 self.use_count
477 );
478 let z = self.use_count == 0;
479 if z {
480 self.set_flag(COMP_THREAD_CAN_EXIT);
481 }
482 z
483 }
484}