1use core::{marker::PhantomData, ops::Range};
2use std::sync::Arc;
3
4use twizzler::object::RawObject;
5use twizzler_abi::{
6 kso::{
7 pack_kaction_pin_start_and_len, unpack_kaction_pin_token_and_len, KactionCmd, KactionFlags,
8 KactionGenericCmd,
9 },
10 object::NULLPAGE_SIZE,
11 syscall::{sys_kaction, PinnedPage},
12};
13
14use super::{
15 pin::{PhysInfo, PinError},
16 pool::{AllocatableDmaObject, SplitPageRange},
17 Access, DeviceSync, DmaObject, DmaOptions, DmaPin, SyncMode,
18};
19use crate::arch::DMA_PAGE_SIZE;
20
21pub struct DmaRegion<T: DeviceSync> {
24 virt: *mut u8,
25 backing: Option<(Vec<PhysInfo>, u32)>,
26 len: usize,
27 access: Access,
28 pool: Option<(Arc<AllocatableDmaObject>, SplitPageRange)>,
30 options: DmaOptions,
31 offset: usize,
32 _pd: PhantomData<T>,
33}
34
35impl<T: DeviceSync> core::fmt::Debug for DmaRegion<T> {
36 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
37 f.debug_struct("DmaRegion")
38 .field("len", &self.len)
39 .finish_non_exhaustive()
40 }
41}
42
43unsafe impl<T: DeviceSync> Send for DmaRegion<T> {}
44
45#[derive(Debug)]
48pub struct DmaSliceRegion<T: DeviceSync> {
49 region: DmaRegion<T>,
50 len: usize,
51}
52
53impl<'a, T: DeviceSync> DmaRegion<T> {
54 pub(super) fn new(
55 len: usize,
56 access: Access,
57 options: DmaOptions,
58 offset: usize,
59 pool: Option<(Arc<AllocatableDmaObject>, SplitPageRange)>,
60 ) -> Self {
61 Self {
62 virt: unsafe {
63 (pool
64 .as_ref()
65 .unwrap()
66 .0
67 .dma_object()
68 .object()
69 .base_mut_ptr::<u8>())
70 .add(offset)
71 .sub(NULLPAGE_SIZE)
72 },
73 len,
74 access,
75 options,
76 backing: None,
77 pool,
78 offset,
79 _pd: PhantomData,
80 }
81 }
82
83 pub(super) fn fill(&mut self, init: T) {
84 let p = self.virt as *mut T;
85 unsafe {
86 p.write_volatile(init);
87 self.sync(SyncMode::FullCoherence);
88 }
89 }
90
91 fn dma_object(&self) -> &DmaObject {
92 self.pool.as_ref().unwrap().0.dma_object()
93 }
94
95 pub fn nr_pages(&self) -> usize {
97 (self.len - 1) / DMA_PAGE_SIZE + 1
98 }
99
100 fn setup_backing(&mut self) -> Result<(), PinError> {
101 if self.backing.is_some() {
102 return Ok(());
103 }
104 let mut pins = Vec::new();
105 let len = self.nr_pages();
106 pins.resize(len, PinnedPage::new(0));
107
108 let start = (self.offset / DMA_PAGE_SIZE) as u64;
111 let ptr = (&pins).as_ptr() as u64;
112 let res = sys_kaction(
113 KactionCmd::Generic(KactionGenericCmd::PinPages(0)),
114 Some(self.dma_object().object().id()),
115 ptr,
116 pack_kaction_pin_start_and_len(start, len).ok_or(PinError::InternalError)?,
117 KactionFlags::empty(),
118 )
119 .map_err(|_| PinError::InternalError)?
120 .u64()
121 .ok_or(PinError::InternalError)?;
122
123 let (token, retlen) =
124 unpack_kaction_pin_token_and_len(res).ok_or(PinError::InternalError)?;
125
126 if retlen < len {
127 return Err(PinError::Exhausted);
128 } else if retlen > len {
129 return Err(PinError::InternalError);
130 }
131
132 let backing: Result<Vec<_>, _> = pins
133 .iter()
134 .map(|p| p.physical_address().try_into().map(|pa| PhysInfo::new(pa)))
135 .collect();
136
137 self.backing = Some((backing.map_err(|_| PinError::InternalError)?, token));
138
139 Ok(())
140 }
141
142 pub fn num_bytes(&self) -> usize {
144 self.len
145 }
146
147 pub fn access(&self) -> Access {
149 self.access
150 }
151
152 pub fn pin(&mut self) -> Result<DmaPin<'_>, PinError> {
155 self.setup_backing()?;
156 Ok(DmaPin::new(&self.backing.as_ref().unwrap().0))
157 }
158
159 pub fn sync(&self, sync: SyncMode) {
161 crate::arch::sync(self, sync, 0, self.len);
162 }
163
164 pub fn with<F, R>(&self, f: F) -> R
166 where
167 F: FnOnce(&T) -> R,
168 {
169 if !self.options.contains(DmaOptions::UNSAFE_MANUAL_COHERENCE) {
170 if self.access() != Access::HostToDevice {
171 self.sync(SyncMode::PostDeviceToCpu);
172 }
173 }
174 let data = unsafe { self.get() };
175 let ret = f(data);
176 ret
177 }
178
179 pub fn with_mut<F, R>(&mut self, f: F) -> R
181 where
182 F: FnOnce(&mut T) -> R,
183 {
184 if !self.options.contains(DmaOptions::UNSAFE_MANUAL_COHERENCE) {
185 match self.access() {
186 Access::HostToDevice => self.sync(SyncMode::PreCpuToDevice),
187 Access::DeviceToHost => self.sync(SyncMode::PostDeviceToCpu),
188 Access::BiDirectional => self.sync(SyncMode::FullCoherence),
189 }
190 }
191 let data = unsafe { self.get_mut() };
192 let ret = f(data);
193 if !self.options.contains(DmaOptions::UNSAFE_MANUAL_COHERENCE) {
194 if self.access() != Access::DeviceToHost {
195 self.sync(SyncMode::PostCpuToDevice);
196 }
197 }
198 ret
199 }
200
201 pub unsafe fn release_pin(&mut self) {
207 if let Some((_, token)) = self.backing {
208 super::object::release_pin(self.dma_object().object().id(), token);
209 self.backing = None;
210 }
211 }
212
213 #[inline]
218 pub unsafe fn get(&self) -> &T {
219 (self.virt as *const T).as_ref().unwrap()
220 }
221
222 #[inline]
227 pub unsafe fn get_mut(&mut self) -> &mut T {
228 (self.virt as *mut T).as_mut().unwrap()
229 }
230}
231
232impl<'a, T: DeviceSync> DmaSliceRegion<T> {
233 pub(super) fn new(
234 nrbytes: usize,
235 access: Access,
236 options: DmaOptions,
237 offset: usize,
238 len: usize,
239 pool: Option<(Arc<AllocatableDmaObject>, SplitPageRange)>,
240 ) -> Self {
241 Self {
242 region: DmaRegion::new(nrbytes, access, options, offset, pool),
243 len,
244 }
245 }
246
247 pub(super) fn fill(&mut self, init: T)
248 where
249 T: Clone,
250 {
251 let p = self.region.virt as *mut T;
252 for idx in 0..self.len {
253 unsafe {
254 p.add(idx).write_volatile(init.clone());
255 }
256 }
257 self.sync(0..self.len, SyncMode::FullCoherence);
258 }
259
260 pub(super) fn fill_with(&mut self, init: impl Fn() -> T) {
261 let p = self.region.virt as *mut T;
262 for idx in 0..self.len {
263 unsafe {
264 p.add(idx).write_volatile(init());
265 }
266 }
267 self.sync(0..self.len, SyncMode::FullCoherence);
268 }
269
270 pub fn num_bytes(&self) -> usize {
272 self.region.len
273 }
274
275 #[inline]
276 pub fn access(&self) -> Access {
278 self.region.access()
279 }
280
281 pub fn len(&self) -> usize {
283 self.len
284 }
285
286 #[inline]
289 pub fn pin(&mut self) -> Result<DmaPin<'_>, PinError> {
290 self.region.pin()
291 }
292
293 pub fn sync(&self, range: Range<usize>, sync: SyncMode) {
295 let start = range.start * core::mem::size_of::<T>();
296 let len = range.len() * core::mem::size_of::<T>();
297 crate::arch::sync(&self.region, sync, start, len);
298 }
299
300 pub fn with<F, R>(&self, range: Range<usize>, f: F) -> R
302 where
303 F: FnOnce(&[T]) -> R,
304 {
305 if !self
306 .region
307 .options
308 .contains(DmaOptions::UNSAFE_MANUAL_COHERENCE)
309 {
310 if self.access() != Access::HostToDevice {
311 self.sync(range.clone(), SyncMode::PostDeviceToCpu);
312 }
313 }
314 let data = &unsafe { self.get() }[range];
315 let ret = f(data);
316 ret
317 }
318
319 pub fn with_mut<F, R>(&mut self, range: Range<usize>, f: F) -> R
322 where
323 F: FnOnce(&mut [T]) -> R,
324 {
325 if !self
326 .region
327 .options
328 .contains(DmaOptions::UNSAFE_MANUAL_COHERENCE)
329 {
330 match self.access() {
331 Access::HostToDevice => self.sync(range.clone(), SyncMode::PreCpuToDevice),
332 Access::DeviceToHost => self.sync(range.clone(), SyncMode::PostDeviceToCpu),
333 Access::BiDirectional => self.sync(range.clone(), SyncMode::FullCoherence),
334 }
335 }
336 let data = &mut unsafe { self.get_mut() }[range.clone()];
337 let ret = f(data);
338 if !self
339 .region
340 .options
341 .contains(DmaOptions::UNSAFE_MANUAL_COHERENCE)
342 {
343 if self.access() != Access::DeviceToHost {
344 self.sync(range, SyncMode::PostCpuToDevice);
345 }
346 }
347 ret
348 }
349
350 #[inline]
356 pub unsafe fn release_pin(&mut self) {
357 self.region.release_pin()
358 }
359
360 #[inline]
365 pub unsafe fn get(&self) -> &[T] {
366 core::slice::from_raw_parts(self.region.virt as *const T, self.len)
367 }
368
369 #[inline]
374 pub unsafe fn get_mut(&mut self) -> &mut [T] {
375 core::slice::from_raw_parts_mut(self.region.virt as *mut T, self.len)
376 }
377}
378
379impl<'a, T: DeviceSync> Drop for DmaRegion<T> {
380 fn drop(&mut self) {
381 if let Some((_, token)) = self.backing.as_ref() {
382 self.dma_object()
383 .releasable_pins
384 .lock()
385 .unwrap()
386 .push(*token);
387 }
388
389 if let Some((ado, range)) = self.pool.take() {
390 ado.free(range);
391 }
392 }
393}