secgate/
lib.rs

1#![feature(fn_traits)]
2#![feature(unboxed_closures)]
3#![feature(tuple_trait)]
4#![feature(naked_functions)]
5#![feature(auto_traits)]
6#![feature(negative_impls)]
7#![feature(linkage)]
8#![feature(maybe_uninit_as_bytes)]
9#![feature(thread_local)]
10
11use core::ffi::{c_char, CStr};
12use std::{
13    cell::UnsafeCell,
14    fmt::Debug,
15    marker::{PhantomData, Tuple},
16    mem::MaybeUninit,
17    sync::OnceLock,
18};
19
20pub use secgate_macros::*;
21use twizzler_abi::object::ObjID;
22use twizzler_rt_abi::error::{ResourceError, TwzError};
23
24pub mod util;
25
26/// A struct of information about a secure gate. These are auto-generated by the
27/// [crate::secure_gate] macro, and stored in a special ELF section (.twz_secgate_info) as an array.
28/// The dynamic linker and monitor can then use this to easily enumerate gates.
29#[repr(C)]
30pub struct SecGateInfo<F> {
31    /// A pointer to the implementation entry function. This must be a pointer, and we statically
32    /// check that is has the same size as usize (sorry cheri, we'll fix this another time)
33    pub imp: F,
34    /// The name of this secure gate. This must be a pointer to a null-terminated C string.
35    name: *const c_char,
36}
37
38impl<F> core::fmt::Debug for SecGateInfo<F> {
39    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
40        write!(f, "SecGateInfo({:p})", self.name)
41    }
42}
43
44impl<F> SecGateInfo<F> {
45    pub const fn new(imp: F, name: &'static CStr) -> Self {
46        Self {
47            imp,
48            name: name.as_ptr(),
49        }
50    }
51
52    pub fn name(&self) -> &CStr {
53        // Safety: we only ever construct self from a static CStr.
54        unsafe { CStr::from_ptr(self.name) }
55    }
56}
57
58// Safety: If F is Send, we are too because the name field points to a static C string that cannot
59// be written to.
60unsafe impl<F: Send> Send for SecGateInfo<F> {}
61// Safety: If F is Sync, we are too because the name field points to a static C string that cannot
62// be written to.
63unsafe impl<F: Sync> Sync for SecGateInfo<F> {}
64
65/// Minimum alignment of secure trampolines.
66pub const SECGATE_TRAMPOLINE_ALIGN: usize = 0x10;
67
68/// Non-generic and non-pointer-based SecGateInfo, for use during dynamic linking.
69pub type RawSecGateInfo = SecGateInfo<usize>;
70// Ensure that these are the same size because the dynamic linker uses the raw variant.
71static_assertions::assert_eq_size!(RawSecGateInfo, SecGateInfo<&fn()>);
72
73/// Arguments that will be passed to the secure call. Concrete versions of this are generated by the
74/// macro.
75#[derive(Clone, Copy)]
76#[repr(C)]
77pub struct Arguments<Args: Tuple + Crossing + Copy> {
78    args: Args,
79}
80
81impl<Args: Tuple + Crossing + Copy> Arguments<Args> {
82    pub fn with_alloca<F, R>(args: Args, f: F) -> R
83    where
84        F: FnOnce(&mut Self) -> R,
85    {
86        alloca::alloca(|stack_space| {
87            stack_space.write(Self { args });
88            // Safety: we init the MaybeUninit just above.
89            f(unsafe { stack_space.assume_init_mut() })
90        })
91    }
92
93    pub fn into_inner(self) -> Args {
94        self.args
95    }
96}
97
98/// Return value to be filled by the secure call. Concrete versions of this are generated by the
99/// macro.
100#[derive(Copy)]
101#[repr(C)]
102pub struct Return<T: Crossing + Copy> {
103    isset: bool,
104    ret: MaybeUninit<T>,
105}
106
107impl<T: Copy + Crossing> Clone for Return<T> {
108    fn clone(&self) -> Self {
109        *self
110    }
111}
112
113impl<T: Crossing + Copy> Return<T> {
114    pub fn with_alloca<F, R>(f: F) -> R
115    where
116        F: FnOnce(&mut Self) -> R,
117    {
118        alloca::alloca(|stack_space| {
119            stack_space.write(Self {
120                isset: false,
121                ret: MaybeUninit::uninit(),
122            });
123            // Safety: we init the MaybeUninit just above.
124            f(unsafe { stack_space.assume_init_mut() })
125        })
126    }
127
128    /// If a previous call to set is made, or this was constructed by new(), then into_inner
129    /// returns the inner value. Otherwise, returns None.
130    pub fn into_inner(self) -> Option<T> {
131        if self.isset {
132            Some(unsafe { self.ret.assume_init() })
133        } else {
134            None
135        }
136    }
137
138    /// Construct a new, uninitialized Self.
139    pub fn new_uninit() -> Self {
140        Self {
141            isset: false,
142            ret: MaybeUninit::uninit(),
143        }
144    }
145
146    /// Set the inner value. Future call to into_inner will return Some(val).
147    pub fn set(&mut self, val: T) {
148        self.ret.write(val);
149        self.isset = true;
150    }
151}
152
153/// An auto trait that limits the types that can be send across to another compartment. These are:
154/// 1. Types other than references, UnsafeCell, raw pointers, slices.
155/// 2. #[repr(C)] structs and enums made from Crossing types.
156///
157/// # Safety
158/// The type must meet the above requirements.
159pub unsafe auto trait Crossing {}
160
161impl<T> !Crossing for &T {}
162impl<T> !Crossing for &mut T {}
163impl<T: ?Sized> !Crossing for UnsafeCell<T> {}
164impl<T> !Crossing for *const T {}
165impl<T> !Crossing for *mut T {}
166impl<T> !Crossing for &[T] {}
167impl<T> !Crossing for &mut [T] {}
168
169unsafe impl<T: Crossing + Copy> Crossing for Result<T, TwzError> {}
170
171/// Required to put in your source if you call any secure gates.
172// TODO: this isn't ideal, but it's the only solution I have at the moment. For some reason,
173// the linker doesn't even bother linking the libcalloca.a library that alloca creates. This forces
174// that to happen.
175#[macro_export]
176macro_rules! secgate_prelude {
177    () => {
178        #[link(name = "calloca", kind = "static")]
179        extern "C" {
180            pub fn c_with_alloca();
181        }
182    };
183}
184
185#[derive(Debug, Clone, Copy, PartialEq, PartialOrd, Ord, Eq, Hash)]
186#[repr(C)]
187pub struct GateCallInfo {
188    thread_id: ObjID,
189    src_ctx: ObjID,
190}
191
192impl GateCallInfo {
193    /// Allocate a new GateCallInfo on the stack for the closure.
194    pub fn with_alloca<F, R>(thread_id: ObjID, src_ctx: ObjID, f: F) -> R
195    where
196        F: FnOnce(&mut Self) -> R,
197    {
198        alloca::alloca(|stack_space| {
199            stack_space.write(Self { thread_id, src_ctx });
200            // Safety: we init the MaybeUninit just above.
201            f(unsafe { stack_space.assume_init_mut() })
202        })
203    }
204
205    /// Get the ID of the source context, or None if the call was not cross-context.
206    pub fn source_context(&self) -> Option<ObjID> {
207        if self.src_ctx.raw() == 0 {
208            None
209        } else {
210            Some(self.src_ctx)
211        }
212    }
213
214    /// Get the ID of the calling thread.
215    pub fn thread_id(&self) -> ObjID {
216        if self.thread_id.raw() == 0 {
217            twizzler_abi::syscall::sys_thread_self_id()
218        } else {
219            self.thread_id
220        }
221    }
222
223    /// Ensures that the data is filled out (may read thread ID from kernel if necessary).
224    pub fn canonicalize(self) -> Self {
225        Self {
226            thread_id: self.thread_id(),
227            src_ctx: self.src_ctx,
228        }
229    }
230}
231
232fn get_tp() -> usize {
233    let mut val: usize;
234    unsafe {
235        #[cfg(target_arch = "x86_64")]
236        core::arch::asm!("rdfsbase {}", out(reg) val);
237        #[cfg(not(target_arch = "x86_64"))]
238        core::arch::asm!("mrs {}, tpidr_el0", out(reg) val);
239    }
240    val
241}
242
243pub fn get_thread_id() -> ObjID {
244    #[thread_local]
245    static ONCE_ID: OnceLock<ObjID> = OnceLock::new();
246    if get_tp() != 0 {
247        *ONCE_ID.get_or_init(|| twizzler_abi::syscall::sys_thread_self_id())
248    } else {
249        twizzler_abi::syscall::sys_thread_self_id()
250    }
251}
252
253pub fn get_sctx_id() -> ObjID {
254    #[thread_local]
255    static ONCE_ID: OnceLock<ObjID> = OnceLock::new();
256    if get_tp() != 0 {
257        *ONCE_ID.get_or_init(|| twizzler_abi::syscall::sys_thread_active_sctx_id())
258    } else {
259        twizzler_abi::syscall::sys_thread_active_sctx_id()
260    }
261}
262
263pub fn runtime_preentry() -> Result<(), TwzError> {
264    twizzler_rt_abi::core::twz_rt_cross_compartment_entry()
265}
266
267pub struct SecFrame {
268    tp: usize,
269    sctx: ObjID,
270}
271
272pub fn frame() -> SecFrame {
273    let tp = get_tp();
274    // TODO: do this without calling the kernel.
275    let sctx = twizzler_abi::syscall::sys_thread_active_sctx_id();
276    SecFrame { tp, sctx }
277}
278
279pub fn restore_frame(frame: SecFrame) {
280    if frame.tp != 0 {
281        twizzler_abi::syscall::sys_thread_settls(frame.tp as u64);
282    }
283    twizzler_abi::syscall::sys_thread_set_active_sctx_id(frame.sctx).unwrap();
284}
285
286#[derive(Clone, Copy)]
287pub struct DynamicSecGate<'comp, A, R> {
288    address: usize,
289    _pd: PhantomData<&'comp (A, R)>,
290}
291
292impl<'a, A: Tuple + Crossing + Copy, R: Crossing + Copy> Fn<A> for DynamicSecGate<'a, A, R> {
293    extern "rust-call" fn call(&self, args: A) -> Self::Output {
294        unsafe { dynamic_gate_call(*self, args) }
295    }
296}
297
298impl<'a, A: Tuple + Crossing + Copy, R: Crossing + Copy> FnMut<A> for DynamicSecGate<'a, A, R> {
299    extern "rust-call" fn call_mut(&mut self, args: A) -> Self::Output {
300        unsafe { dynamic_gate_call(*self, args) }
301    }
302}
303
304impl<'a, A: Tuple + Crossing + Copy, R: Crossing + Copy> FnOnce<A> for DynamicSecGate<'a, A, R> {
305    type Output = Result<R, TwzError>;
306
307    extern "rust-call" fn call_once(self, args: A) -> Self::Output {
308        unsafe { dynamic_gate_call(self, args) }
309    }
310}
311
312impl<'a, A, R> Debug for DynamicSecGate<'a, A, R> {
313    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
314        write!(
315            f,
316            "DynamicSecGate [{} -> {}] {{ address: {:x} }}",
317            std::any::type_name::<A>(),
318            std::any::type_name::<R>(),
319            self.address
320        )
321    }
322}
323
324impl<'comp, A, R> DynamicSecGate<'comp, A, R> {
325    pub unsafe fn new(address: usize) -> Self {
326        Self {
327            address,
328            _pd: PhantomData,
329        }
330    }
331}
332
333pub unsafe fn dynamic_gate_call<A: Tuple + Crossing + Copy, R: Crossing + Copy>(
334    target: DynamicSecGate<A, R>,
335    args: A,
336) -> Result<R, TwzError> {
337    let frame = frame();
338    // Allocate stack space for args + ret. Args::with_alloca also inits the memory.
339    let ret = GateCallInfo::with_alloca(get_thread_id(), get_sctx_id(), |info| {
340        Arguments::<A>::with_alloca(args, |args| {
341            Return::<Result<R, TwzError>>::with_alloca(|ret| {
342                // Call the trampoline in the mod.
343                unsafe {
344                        //#mod_name::#trampoline_name_without_prefix(info as *const _, args as *const _, ret as *mut _);
345                        #[cfg(target_arch = "x86_64")]
346                        core::arch::asm!("call {target}", target = in(reg) target.address, in("rdi") info as *const _, in("rsi") args as *const _, in("rdx") ret as *mut _, clobber_abi("C"));
347                        #[cfg(not(target_arch = "x86_64"))]
348                        todo!()
349                    }
350                ret.into_inner()
351            })
352        })
353    });
354    restore_frame(frame);
355    ret.ok_or(ResourceError::Unavailable)?
356}