use core::{marker::PhantomData, ops::Range};
use std::sync::Arc;
use twizzler_abi::{
kso::{
pack_kaction_pin_start_and_len, unpack_kaction_pin_token_and_len, KactionCmd, KactionFlags,
KactionGenericCmd,
},
object::NULLPAGE_SIZE,
syscall::{sys_kaction, PinnedPage},
};
use super::{
pin::{PhysInfo, PinError},
pool::{AllocatableDmaObject, SplitPageRange},
Access, DeviceSync, DmaObject, DmaOptions, DmaPin, SyncMode,
};
use crate::arch::DMA_PAGE_SIZE;
pub struct DmaRegion<T: DeviceSync> {
virt: *mut u8,
backing: Option<(Vec<PhysInfo>, u32)>,
len: usize,
access: Access,
pool: Option<(Arc<AllocatableDmaObject>, SplitPageRange)>,
options: DmaOptions,
offset: usize,
_pd: PhantomData<T>,
}
pub struct DmaSliceRegion<T: DeviceSync> {
region: DmaRegion<T>,
len: usize,
}
impl<'a, T: DeviceSync> DmaRegion<T> {
pub(super) fn new(
len: usize,
access: Access,
options: DmaOptions,
offset: usize,
pool: Option<(Arc<AllocatableDmaObject>, SplitPageRange)>,
) -> Self {
Self {
virt: unsafe {
(pool
.as_ref()
.unwrap()
.0
.dma_object()
.object()
.base_mut_unchecked() as *mut () as *mut u8)
.add(offset)
.sub(NULLPAGE_SIZE)
},
len,
access,
options,
backing: None,
pool,
offset,
_pd: PhantomData,
}
}
pub(super) fn fill(&mut self, init: T) {
let p = self.virt as *mut T;
unsafe {
p.write_volatile(init);
self.sync(SyncMode::FullCoherence);
}
}
fn dma_object(&self) -> &DmaObject {
self.pool.as_ref().unwrap().0.dma_object()
}
pub fn nr_pages(&self) -> usize {
(self.len - 1) / DMA_PAGE_SIZE + 1
}
fn setup_backing(&mut self) -> Result<(), PinError> {
if self.backing.is_some() {
return Ok(());
}
let mut pins = Vec::new();
let len = self.nr_pages();
pins.resize(len, PinnedPage::new(0));
let start = (self.offset / DMA_PAGE_SIZE) as u64;
let ptr = (&pins).as_ptr() as u64;
let res = sys_kaction(
KactionCmd::Generic(KactionGenericCmd::PinPages(0)),
Some(self.dma_object().object().id()),
ptr,
pack_kaction_pin_start_and_len(start, len).ok_or(PinError::InternalError)?,
KactionFlags::empty(),
)
.map_err(|_| PinError::InternalError)?
.u64()
.ok_or(PinError::InternalError)?;
let (token, retlen) =
unpack_kaction_pin_token_and_len(res).ok_or(PinError::InternalError)?;
if retlen < len {
return Err(PinError::Exhausted);
} else if retlen > len {
return Err(PinError::InternalError);
}
let backing: Result<Vec<_>, _> = pins
.iter()
.map(|p| p.physical_address().try_into().map(|pa| PhysInfo::new(pa)))
.collect();
self.backing = Some((backing.map_err(|_| PinError::InternalError)?, token));
Ok(())
}
pub fn num_bytes(&self) -> usize {
self.len
}
pub fn access(&self) -> Access {
self.access
}
pub fn pin(&mut self) -> Result<DmaPin<'_>, PinError> {
self.setup_backing()?;
Ok(DmaPin::new(&self.backing.as_ref().unwrap().0))
}
pub fn sync(&self, sync: SyncMode) {
crate::arch::sync(self, sync, 0, self.len);
}
pub fn with<F, R>(&self, f: F) -> R
where
F: FnOnce(&T) -> R,
{
if !self.options.contains(DmaOptions::UNSAFE_MANUAL_COHERENCE) {
if self.access() != Access::HostToDevice {
self.sync(SyncMode::PostDeviceToCpu);
}
}
let data = unsafe { self.get() };
let ret = f(data);
ret
}
pub fn with_mut<F, R>(&mut self, f: F) -> R
where
F: FnOnce(&mut T) -> R,
{
if !self.options.contains(DmaOptions::UNSAFE_MANUAL_COHERENCE) {
match self.access() {
Access::HostToDevice => self.sync(SyncMode::PreCpuToDevice),
Access::DeviceToHost => self.sync(SyncMode::PostDeviceToCpu),
Access::BiDirectional => self.sync(SyncMode::FullCoherence),
}
}
let data = unsafe { self.get_mut() };
let ret = f(data);
if !self.options.contains(DmaOptions::UNSAFE_MANUAL_COHERENCE) {
if self.access() != Access::DeviceToHost {
self.sync(SyncMode::PostCpuToDevice);
}
}
ret
}
pub unsafe fn release_pin(&mut self) {
if let Some((_, token)) = self.backing {
super::object::release_pin(self.dma_object().object().id(), token);
self.backing = None;
}
}
#[inline]
pub unsafe fn get(&self) -> &T {
(self.virt as *const T).as_ref().unwrap()
}
#[inline]
pub unsafe fn get_mut(&mut self) -> &mut T {
(self.virt as *mut T).as_mut().unwrap()
}
}
impl<'a, T: DeviceSync> DmaSliceRegion<T> {
pub(super) fn new(
nrbytes: usize,
access: Access,
options: DmaOptions,
offset: usize,
len: usize,
pool: Option<(Arc<AllocatableDmaObject>, SplitPageRange)>,
) -> Self {
Self {
region: DmaRegion::new(nrbytes, access, options, offset, pool),
len,
}
}
pub(super) fn fill(&mut self, init: T)
where
T: Clone,
{
let p = self.region.virt as *mut T;
for idx in 0..self.len {
unsafe {
p.add(idx).write_volatile(init.clone());
}
}
self.sync(0..self.len, SyncMode::FullCoherence);
}
pub(super) fn fill_with(&mut self, init: impl Fn() -> T) {
let p = self.region.virt as *mut T;
for idx in 0..self.len {
unsafe {
p.add(idx).write_volatile(init());
}
}
self.sync(0..self.len, SyncMode::FullCoherence);
}
pub fn num_bytes(&self) -> usize {
self.region.len
}
#[inline]
pub fn access(&self) -> Access {
self.region.access()
}
pub fn len(&self) -> usize {
self.len
}
#[inline]
pub fn pin(&mut self) -> Result<DmaPin<'_>, PinError> {
self.region.pin()
}
pub fn sync(&self, range: Range<usize>, sync: SyncMode) {
let start = range.start * core::mem::size_of::<T>();
let len = range.len() * core::mem::size_of::<T>();
crate::arch::sync(&self.region, sync, start, len);
}
pub fn with<F, R>(&self, range: Range<usize>, f: F) -> R
where
F: FnOnce(&[T]) -> R,
{
if !self
.region
.options
.contains(DmaOptions::UNSAFE_MANUAL_COHERENCE)
{
if self.access() != Access::HostToDevice {
self.sync(range.clone(), SyncMode::PostDeviceToCpu);
}
}
let data = &unsafe { self.get() }[range];
let ret = f(data);
ret
}
pub fn with_mut<F, R>(&mut self, range: Range<usize>, f: F) -> R
where
F: FnOnce(&mut [T]) -> R,
{
if !self
.region
.options
.contains(DmaOptions::UNSAFE_MANUAL_COHERENCE)
{
match self.access() {
Access::HostToDevice => self.sync(range.clone(), SyncMode::PreCpuToDevice),
Access::DeviceToHost => self.sync(range.clone(), SyncMode::PostDeviceToCpu),
Access::BiDirectional => self.sync(range.clone(), SyncMode::FullCoherence),
}
}
let data = &mut unsafe { self.get_mut() }[range.clone()];
let ret = f(data);
if !self
.region
.options
.contains(DmaOptions::UNSAFE_MANUAL_COHERENCE)
{
if self.access() != Access::DeviceToHost {
self.sync(range, SyncMode::PostCpuToDevice);
}
}
ret
}
#[inline]
pub unsafe fn release_pin(&mut self) {
self.region.release_pin()
}
#[inline]
pub unsafe fn get(&self) -> &[T] {
core::slice::from_raw_parts(self.region.virt as *const T, self.len)
}
#[inline]
pub unsafe fn get_mut(&mut self) -> &mut [T] {
core::slice::from_raw_parts_mut(self.region.virt as *mut T, self.len)
}
}
impl<'a, T: DeviceSync> Drop for DmaRegion<T> {
fn drop(&mut self) {
if let Some((_, token)) = self.backing.as_ref() {
self.dma_object()
.releasable_pins
.lock()
.unwrap()
.push(*token);
}
if let Some((ado, range)) = self.pool.take() {
ado.free(range);
}
}
}