twizzler_driver/dma/
pool.rs

1use std::sync::{Arc, Mutex};
2
3use twizzler::{
4    marker::{BaseType, Invariant},
5    object::ObjectBuilder,
6};
7use twizzler_abi::object::{MAX_SIZE, NULLPAGE_SIZE};
8
9use super::{Access, DeviceSync, DmaObject, DmaOptions, DmaRegion, DmaSliceRegion, DMA_PAGE_SIZE};
10
11#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone)]
12pub(super) struct SplitPageRange {
13    start: usize,
14    len: usize,
15}
16
17pub(super) enum Split {
18    Single(SplitPageRange),
19    Multiple(SplitPageRange, SplitPageRange),
20}
21
22impl SplitPageRange {
23    fn new(start: usize, len: usize) -> Self {
24        Self { start, len }
25    }
26
27    fn split(self, newlen: usize) -> Split {
28        let start = self.start;
29        let len = self.len;
30        if newlen == 0 || newlen == len {
31            return Split::Single(Self { start, len });
32        }
33        Split::Multiple(
34            Self { start, len: newlen },
35            Self {
36                start: start + newlen,
37                len: len - newlen,
38            },
39        )
40    }
41
42    fn merge(self, other: Self) -> Self {
43        let (first, second) = if self.start < other.start {
44            (self, other)
45        } else {
46            (other, self)
47        };
48        assert!(first.adjacent_before(&second));
49
50        Self {
51            start: first.start,
52            len: first.len + second.len,
53        }
54    }
55
56    fn adjacent_before(&self, other: &Self) -> bool {
57        self.start < other.start && self.start + self.len == other.start
58    }
59
60    fn len(&self) -> usize {
61        self.len
62    }
63
64    #[cfg(test)]
65    fn start(&self) -> usize {
66        self.start
67    }
68
69    fn offset(&self) -> usize {
70        self.start * DMA_PAGE_SIZE
71    }
72}
73
74#[cfg(test)]
75pub mod tests_split_page_range {
76    use super::SplitPageRange;
77    use crate::dma::pool::compact_range_list;
78
79    #[test]
80    fn spr_split_multiple() {
81        let r = SplitPageRange::new(2, 7);
82        let split = r.split(4);
83        if let super::Split::Multiple(a, b) = split {
84            assert_eq!(a.len(), 4);
85            assert_eq!(a.start(), 2);
86            assert_eq!(b.len(), 3);
87            assert_eq!(b.start(), 6);
88        } else {
89            panic!("split broken");
90        }
91    }
92
93    #[test]
94    fn spr_split_single1() {
95        let r = SplitPageRange::new(2, 7);
96        let split = r.split(7);
97        if let super::Split::Single(r) = split {
98            assert_eq!(r.len(), 7);
99            assert_eq!(r.start(), 2);
100        } else {
101            panic!("split broken");
102        }
103    }
104
105    #[test]
106    fn spr_split_single2() {
107        let r = SplitPageRange::new(2, 7);
108        let split = r.split(0);
109        if let super::Split::Single(r) = split {
110            assert_eq!(r.len(), 7);
111            assert_eq!(r.start(), 2);
112        } else {
113            panic!("split broken");
114        }
115    }
116
117    #[test]
118    fn spr_merge() {
119        let a = SplitPageRange::new(2, 4);
120        let b = SplitPageRange::new(6, 3);
121        let r = a.merge(b);
122        assert_eq!(r.start(), 2);
123        assert_eq!(r.len(), 7);
124    }
125
126    #[test]
127    fn spr_adj() {
128        let a = SplitPageRange::new(2, 4);
129        let b = SplitPageRange::new(1, 1);
130        let c = SplitPageRange::new(6, 4);
131
132        assert!(!a.adjacent_before(&b));
133        assert!(b.adjacent_before(&a));
134        assert!(!a.adjacent_before(&a));
135        assert!(a.adjacent_before(&c));
136    }
137
138    #[test]
139    fn spr_merge_alg() {
140        let a = SplitPageRange::new(2, 4);
141        let b = SplitPageRange::new(0, 1);
142        let c = SplitPageRange::new(6, 4);
143        let x = SplitPageRange::new(2, 8);
144        let mut list = vec![a.clone(), b.clone(), c.clone()];
145        let single_list = vec![a.clone()];
146        let slw: Vec<_> = single_list.windows(2).collect();
147        assert!(slw.is_empty());
148
149        compact_range_list(&mut list);
150
151        assert_eq!(list, vec![b, x]);
152    }
153}
154
155pub(super) struct AllocatableDmaObject {
156    dma: DmaObject,
157    freelist: Mutex<Vec<SplitPageRange>>,
158}
159
160/// A pool for allocating DMA regions that all share a common access type and DMA options.
161pub struct DmaPool {
162    opts: DmaOptions,
163    spec: ObjectBuilder<()>,
164    access: Access,
165    objects: Mutex<Vec<Arc<AllocatableDmaObject>>>,
166}
167
168/// Possible errors that can arise from a DMA pool allocation.
169#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
170pub enum AllocationError {
171    /// The requested region size was too large.
172    TooBig,
173    /// An internal error occurred.
174    InternalError,
175}
176
177#[repr(C)]
178struct EmptyBase;
179
180unsafe impl Invariant for EmptyBase {}
181impl BaseType for EmptyBase {}
182
183// Merge adjacent regions by sorting, comparing pairs, and merging if they are adjacent.
184// Keep going until we cannot merge anymore.
185fn compact_range_list(list: &mut Vec<SplitPageRange>) {
186    list.sort();
187    loop {
188        let pairs: Vec<_> = list
189            .windows(2)
190            .enumerate()
191            .filter_map(|(idx, ranges)| {
192                if ranges[0].adjacent_before(&ranges[1]) {
193                    Some(idx)
194                } else {
195                    None
196                }
197            })
198            .collect();
199
200        if pairs.is_empty() {
201            break;
202        }
203
204        // Iterate in reverse to compact from top, so as to not mess up indices.
205        for pair in pairs.iter().rev() {
206            // Grab the second item first to not mess up indices.
207            let second = list.remove(pair + 1);
208            let new = list[*pair].clone().merge(second);
209            list[*pair] = new;
210        }
211    }
212}
213
214impl AllocatableDmaObject {
215    pub(super) fn dma_object(&self) -> &DmaObject {
216        &self.dma
217    }
218
219    pub(super) fn free(&self, range: SplitPageRange) {
220        let mut freelist = self.freelist.lock().unwrap();
221        freelist.push(range);
222
223        compact_range_list(&mut freelist);
224        // TODO: consider that, if the entire object get free'd, we could delete the object.
225    }
226
227    fn allocate(&self, len: usize) -> Option<SplitPageRange> {
228        let mut freelist = self.freelist.lock().unwrap();
229        let nr_pages = (len - 1) / DMA_PAGE_SIZE + 1;
230        let index = freelist.iter().position(|range| range.len() >= nr_pages)?;
231
232        let range = freelist.remove(index);
233        Some(match range.split(nr_pages) {
234            Split::Single(r) => r,
235            Split::Multiple(alloc, extra) => {
236                freelist.push(extra);
237                alloc
238            }
239        })
240    }
241
242    fn new(spec: ObjectBuilder<()>) -> Result<AllocatableDmaObject, AllocationError> {
243        Ok(AllocatableDmaObject {
244            // TODO: automatic object deletion.
245            dma: DmaObject::new::<EmptyBase>(
246                spec.cast()
247                    .build(EmptyBase)
248                    .map_err(|_| AllocationError::InternalError)?,
249            ),
250            freelist: Mutex::new(vec![SplitPageRange::new(
251                1,
252                (MAX_SIZE - NULLPAGE_SIZE * 2) / DMA_PAGE_SIZE,
253            )]),
254        })
255    }
256}
257
258impl DmaPool {
259    /// Create a new DmaPool with access and DMA options, where each created underlying Twizzler
260    /// object is created using the provided [ObjectBuilder]. If default (volatile) options are
261    /// acceptable for the create spec, use the [crate::dma::DmaPool::default_spec] function.
262    pub fn new(spec: ObjectBuilder<()>, access: Access, opts: DmaOptions) -> Self {
263        Self {
264            opts,
265            spec,
266            access,
267            objects: Mutex::new(vec![]),
268        }
269    }
270
271    pub fn default_spec() -> ObjectBuilder<()> {
272        ObjectBuilder::default()
273    }
274
275    fn new_object(&self) -> Result<Arc<AllocatableDmaObject>, AllocationError> {
276        let obj = Arc::new(AllocatableDmaObject::new(self.spec.clone())?);
277        Ok(obj)
278    }
279
280    fn do_allocate(
281        &self,
282        len: usize,
283    ) -> Result<(Arc<AllocatableDmaObject>, SplitPageRange), AllocationError> {
284        if len > MAX_SIZE - NULLPAGE_SIZE * 2 {
285            return Err(AllocationError::TooBig);
286        }
287        let mut objects = self.objects.lock().unwrap();
288        for obj in &*objects {
289            if let Some(pagerange) = obj.allocate(len) {
290                return Ok((obj.clone(), pagerange));
291            }
292        }
293        let obj = self.new_object()?;
294        objects.push(obj);
295        drop(objects);
296        self.do_allocate(len)
297    }
298
299    /// Allocate a new `[DmaRegion]` from the pool. The region will be initialized with the
300    /// provided initial value.
301    pub fn allocate<'a, T: DeviceSync>(&'a self, init: T) -> Result<DmaRegion<T>, AllocationError> {
302        let len = core::mem::size_of::<T>();
303        let (ado, range) = self.do_allocate(len)?;
304        let mut reg = DmaRegion::new(
305            len,
306            self.access,
307            self.opts,
308            range.offset(),
309            Some((ado.clone(), range)),
310        );
311        reg.fill(init);
312        Ok(reg)
313    }
314
315    /// Allocate a new `[DmaSliceRegion]` from the pool. Each entry in the region's slice will
316    /// be initialized with the provided initial value.
317    pub fn allocate_array<'a, T: DeviceSync + Clone>(
318        &'a self,
319        count: usize,
320        init: T,
321    ) -> Result<DmaSliceRegion<T>, AllocationError> {
322        let len = core::mem::size_of::<T>() * count;
323        let (ado, range) = self.do_allocate(len)?;
324        let mut reg = DmaSliceRegion::new(
325            len,
326            self.access,
327            self.opts,
328            range.offset(),
329            count,
330            Some((ado.clone(), range)),
331        );
332        reg.fill(init);
333        Ok(reg)
334    }
335
336    /// Allocate a new `[DmaSliceRegion]` from the pool. Each entry in the region's slice will
337    /// be initialized by running the provided closure.
338    pub fn allocate_array_with<'a, T: DeviceSync>(
339        &'a self,
340        count: usize,
341        init: impl Fn() -> T,
342    ) -> Result<DmaSliceRegion<T>, AllocationError> {
343        let len = core::mem::size_of::<T>() * count;
344        let (ado, range) = self.do_allocate(len)?;
345        let mut reg = DmaSliceRegion::new(
346            len,
347            self.access,
348            self.opts,
349            range.offset(),
350            count,
351            Some((ado.clone(), range)),
352        );
353        reg.fill_with(init);
354        Ok(reg)
355    }
356}
357
358#[cfg(test)]
359mod tests {
360    use super::DmaPool;
361    use crate::dma::{Access, DmaOptions};
362
363    #[test]
364    fn allocate() {
365        let pool = DmaPool::new(
366            DmaPool::default_spec(),
367            Access::BiDirectional,
368            DmaOptions::empty(),
369        );
370
371        let _res = pool.allocate(u32::MAX).unwrap();
372    }
373}