secgate/
lib.rs

1#![feature(fn_traits)]
2#![feature(unboxed_closures)]
3#![feature(tuple_trait)]
4#![feature(naked_functions)]
5#![feature(auto_traits)]
6#![feature(negative_impls)]
7#![feature(linkage)]
8#![feature(maybe_uninit_as_bytes)]
9
10use core::ffi::{c_char, CStr};
11use std::{
12    cell::UnsafeCell,
13    fmt::Debug,
14    marker::{PhantomData, Tuple},
15    mem::MaybeUninit,
16};
17
18pub use secgate_macros::*;
19use twizzler_abi::object::ObjID;
20use twizzler_rt_abi::error::{ResourceError, TwzError};
21
22pub mod util;
23
24/// A struct of information about a secure gate. These are auto-generated by the
25/// [crate::secure_gate] macro, and stored in a special ELF section (.twz_secgate_info) as an array.
26/// The dynamic linker and monitor can then use this to easily enumerate gates.
27#[repr(C)]
28pub struct SecGateInfo<F> {
29    /// A pointer to the implementation entry function. This must be a pointer, and we statically
30    /// check that is has the same size as usize (sorry cheri, we'll fix this another time)
31    pub imp: F,
32    /// The name of this secure gate. This must be a pointer to a null-terminated C string.
33    name: *const c_char,
34}
35
36impl<F> core::fmt::Debug for SecGateInfo<F> {
37    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
38        write!(f, "SecGateInfo({:p})", self.name)
39    }
40}
41
42impl<F> SecGateInfo<F> {
43    pub const fn new(imp: F, name: &'static CStr) -> Self {
44        Self {
45            imp,
46            name: name.as_ptr(),
47        }
48    }
49
50    pub fn name(&self) -> &CStr {
51        // Safety: we only ever construct self from a static CStr.
52        unsafe { CStr::from_ptr(self.name) }
53    }
54}
55
56// Safety: If F is Send, we are too because the name field points to a static C string that cannot
57// be written to.
58unsafe impl<F: Send> Send for SecGateInfo<F> {}
59// Safety: If F is Sync, we are too because the name field points to a static C string that cannot
60// be written to.
61unsafe impl<F: Sync> Sync for SecGateInfo<F> {}
62
63/// Minimum alignment of secure trampolines.
64pub const SECGATE_TRAMPOLINE_ALIGN: usize = 0x10;
65
66/// Non-generic and non-pointer-based SecGateInfo, for use during dynamic linking.
67pub type RawSecGateInfo = SecGateInfo<usize>;
68// Ensure that these are the same size because the dynamic linker uses the raw variant.
69static_assertions::assert_eq_size!(RawSecGateInfo, SecGateInfo<&fn()>);
70
71/// Arguments that will be passed to the secure call. Concrete versions of this are generated by the
72/// macro.
73#[derive(Clone, Copy)]
74#[repr(C)]
75pub struct Arguments<Args: Tuple + Crossing + Copy> {
76    args: Args,
77}
78
79impl<Args: Tuple + Crossing + Copy> Arguments<Args> {
80    pub fn with_alloca<F, R>(args: Args, f: F) -> R
81    where
82        F: FnOnce(&mut Self) -> R,
83    {
84        alloca::alloca(|stack_space| {
85            stack_space.write(Self { args });
86            // Safety: we init the MaybeUninit just above.
87            f(unsafe { stack_space.assume_init_mut() })
88        })
89    }
90
91    pub fn into_inner(self) -> Args {
92        self.args
93    }
94}
95
96/// Return value to be filled by the secure call. Concrete versions of this are generated by the
97/// macro.
98#[derive(Copy)]
99#[repr(C)]
100pub struct Return<T: Crossing + Copy> {
101    isset: bool,
102    ret: MaybeUninit<T>,
103}
104
105impl<T: Copy + Crossing> Clone for Return<T> {
106    fn clone(&self) -> Self {
107        *self
108    }
109}
110
111impl<T: Crossing + Copy> Return<T> {
112    pub fn with_alloca<F, R>(f: F) -> R
113    where
114        F: FnOnce(&mut Self) -> R,
115    {
116        alloca::alloca(|stack_space| {
117            stack_space.write(Self {
118                isset: false,
119                ret: MaybeUninit::uninit(),
120            });
121            // Safety: we init the MaybeUninit just above.
122            f(unsafe { stack_space.assume_init_mut() })
123        })
124    }
125
126    /// If a previous call to set is made, or this was constructed by new(), then into_inner
127    /// returns the inner value. Otherwise, returns None.
128    pub fn into_inner(self) -> Option<T> {
129        if self.isset {
130            Some(unsafe { self.ret.assume_init() })
131        } else {
132            None
133        }
134    }
135
136    /// Construct a new, uninitialized Self.
137    pub fn new_uninit() -> Self {
138        Self {
139            isset: false,
140            ret: MaybeUninit::uninit(),
141        }
142    }
143
144    /// Set the inner value. Future call to into_inner will return Some(val).
145    pub fn set(&mut self, val: T) {
146        self.ret.write(val);
147        self.isset = true;
148    }
149}
150
151/// An auto trait that limits the types that can be send across to another compartment. These are:
152/// 1. Types other than references, UnsafeCell, raw pointers, slices.
153/// 2. #[repr(C)] structs and enums made from Crossing types.
154///
155/// # Safety
156/// The type must meet the above requirements.
157pub unsafe auto trait Crossing {}
158
159impl<T> !Crossing for &T {}
160impl<T> !Crossing for &mut T {}
161impl<T: ?Sized> !Crossing for UnsafeCell<T> {}
162impl<T> !Crossing for *const T {}
163impl<T> !Crossing for *mut T {}
164impl<T> !Crossing for &[T] {}
165impl<T> !Crossing for &mut [T] {}
166
167unsafe impl<T: Crossing + Copy> Crossing for Result<T, TwzError> {}
168
169/// Required to put in your source if you call any secure gates.
170// TODO: this isn't ideal, but it's the only solution I have at the moment. For some reason,
171// the linker doesn't even bother linking the libcalloca.a library that alloca creates. This forces
172// that to happen.
173#[macro_export]
174macro_rules! secgate_prelude {
175    () => {
176        #[link(name = "calloca", kind = "static")]
177        extern "C" {
178            pub fn c_with_alloca();
179        }
180    };
181}
182
183#[derive(Debug, Clone, Copy, PartialEq, PartialOrd, Ord, Eq, Hash)]
184#[repr(C)]
185pub struct GateCallInfo {
186    thread_id: ObjID,
187    src_ctx: ObjID,
188}
189
190impl GateCallInfo {
191    /// Allocate a new GateCallInfo on the stack for the closure.
192    pub fn with_alloca<F, R>(thread_id: ObjID, src_ctx: ObjID, f: F) -> R
193    where
194        F: FnOnce(&mut Self) -> R,
195    {
196        alloca::alloca(|stack_space| {
197            stack_space.write(Self { thread_id, src_ctx });
198            // Safety: we init the MaybeUninit just above.
199            f(unsafe { stack_space.assume_init_mut() })
200        })
201    }
202
203    /// Get the ID of the source context, or None if the call was not cross-context.
204    pub fn source_context(&self) -> Option<ObjID> {
205        if self.src_ctx.raw() == 0 {
206            None
207        } else {
208            Some(self.src_ctx)
209        }
210    }
211
212    /// Get the ID of the calling thread.
213    pub fn thread_id(&self) -> ObjID {
214        if self.thread_id.raw() == 0 {
215            twizzler_abi::syscall::sys_thread_self_id()
216        } else {
217            self.thread_id
218        }
219    }
220
221    /// Ensures that the data is filled out (may read thread ID from kernel if necessary).
222    pub fn canonicalize(self) -> Self {
223        Self {
224            thread_id: self.thread_id(),
225            src_ctx: self.src_ctx,
226        }
227    }
228}
229
230pub fn get_thread_id() -> ObjID {
231    twizzler_abi::syscall::sys_thread_self_id()
232}
233
234pub fn get_sctx_id() -> ObjID {
235    twizzler_abi::syscall::sys_thread_active_sctx_id()
236}
237
238pub fn runtime_preentry() -> Result<(), TwzError> {
239    twizzler_rt_abi::core::twz_rt_cross_compartment_entry()
240}
241
242pub struct SecFrame {
243    tp: usize,
244    sctx: ObjID,
245}
246
247pub fn frame() -> SecFrame {
248    let mut val: usize;
249    unsafe {
250        #[cfg(target_arch = "x86_64")]
251        core::arch::asm!("rdfsbase {}", out(reg) val);
252        #[cfg(not(target_arch = "x86_64"))]
253        core::arch::asm!("mrs {}, tpidr_el0", out(reg) val);
254    }
255    // TODO: do this without calling the kernel.
256    let sctx = twizzler_abi::syscall::sys_thread_active_sctx_id();
257    SecFrame { tp: val, sctx }
258}
259
260pub fn restore_frame(frame: SecFrame) {
261    if frame.tp != 0 {
262        twizzler_abi::syscall::sys_thread_settls(frame.tp as u64);
263    }
264    twizzler_abi::syscall::sys_thread_set_active_sctx_id(frame.sctx).unwrap();
265}
266
267#[derive(Clone, Copy)]
268pub struct DynamicSecGate<'comp, A, R> {
269    address: usize,
270    _pd: PhantomData<&'comp (A, R)>,
271}
272
273impl<'a, A: Tuple + Crossing + Copy, R: Crossing + Copy> Fn<A> for DynamicSecGate<'a, A, R> {
274    extern "rust-call" fn call(&self, args: A) -> Self::Output {
275        unsafe { dynamic_gate_call(*self, args) }
276    }
277}
278
279impl<'a, A: Tuple + Crossing + Copy, R: Crossing + Copy> FnMut<A> for DynamicSecGate<'a, A, R> {
280    extern "rust-call" fn call_mut(&mut self, args: A) -> Self::Output {
281        unsafe { dynamic_gate_call(*self, args) }
282    }
283}
284
285impl<'a, A: Tuple + Crossing + Copy, R: Crossing + Copy> FnOnce<A> for DynamicSecGate<'a, A, R> {
286    type Output = Result<R, TwzError>;
287
288    extern "rust-call" fn call_once(self, args: A) -> Self::Output {
289        unsafe { dynamic_gate_call(self, args) }
290    }
291}
292
293impl<'a, A, R> Debug for DynamicSecGate<'a, A, R> {
294    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
295        write!(
296            f,
297            "DynamicSecGate [{} -> {}] {{ address: {:x} }}",
298            std::any::type_name::<A>(),
299            std::any::type_name::<R>(),
300            self.address
301        )
302    }
303}
304
305impl<'comp, A, R> DynamicSecGate<'comp, A, R> {
306    pub unsafe fn new(address: usize) -> Self {
307        Self {
308            address,
309            _pd: PhantomData,
310        }
311    }
312}
313
314pub unsafe fn dynamic_gate_call<A: Tuple + Crossing + Copy, R: Crossing + Copy>(
315    target: DynamicSecGate<A, R>,
316    args: A,
317) -> Result<R, TwzError> {
318    let frame = frame();
319    // Allocate stack space for args + ret. Args::with_alloca also inits the memory.
320    let ret = GateCallInfo::with_alloca(get_thread_id(), get_sctx_id(), |info| {
321        Arguments::<A>::with_alloca(args, |args| {
322            Return::<Result<R, TwzError>>::with_alloca(|ret| {
323                // Call the trampoline in the mod.
324                unsafe {
325                        //#mod_name::#trampoline_name_without_prefix(info as *const _, args as *const _, ret as *mut _);
326                        #[cfg(target_arch = "x86_64")]
327                        core::arch::asm!("call {target}", target = in(reg) target.address, in("rdi") info as *const _, in("rsi") args as *const _, in("rdx") ret as *mut _, clobber_abi("C"));
328                        #[cfg(not(target_arch = "x86_64"))]
329                        todo!()
330                    }
331                ret.into_inner()
332            })
333        })
334    });
335    restore_frame(frame);
336    ret.ok_or(ResourceError::Unavailable)?
337}