nvme/queue/
mod.rs

1use std::mem::{size_of, MaybeUninit};
2
3use crate::ds::queue::comentry::CommonCompletion;
4
5pub struct SubmissionQueue {
6    tail: u16,
7    head: u16,
8    len: u16,
9    stride: usize,
10    memory: *mut u8,
11}
12
13#[derive(Debug, Clone, Copy, PartialEq, PartialOrd, Ord, Eq)]
14pub enum CreateQueueError {
15    MemoryRegionTooSmall,
16    StrideTooSmall,
17}
18
19impl SubmissionQueue {
20    pub fn new(
21        memory: &mut [u8],
22        nr_entries: u16,
23        stride: usize,
24    ) -> Result<Self, CreateQueueError> {
25        if nr_entries as usize * stride > memory.len() {
26            return Err(CreateQueueError::MemoryRegionTooSmall);
27        }
28        if stride < 1 {
29            return Err(CreateQueueError::StrideTooSmall);
30        }
31        Ok(Self {
32            head: 0,
33            tail: 0,
34            len: nr_entries,
35            stride,
36            memory: memory.as_mut_ptr(),
37        })
38    }
39
40    pub fn is_full(&self) -> bool {
41        self.head == (self.tail + 1) % self.len
42    }
43
44    pub fn is_empty(&self) -> bool {
45        self.tail == self.head
46    }
47
48    pub fn submit_bytes(&mut self, data: &[u8]) -> Option<u16> {
49        if data.len() > self.stride {
50            panic!("tried to submit data too big for stride");
51        }
52        if self.is_full() {
53            return None;
54        }
55        let tail = self.tail;
56        self.tail = (self.tail + 1) % self.len;
57        let ptr = self.get_entry_pointer(tail);
58        let slice = unsafe { core::slice::from_raw_parts_mut(ptr, data.len()) };
59        slice.copy_from_slice(data);
60        Some(self.tail)
61    }
62
63    pub fn submit<T: Copy>(&mut self, data: &T) -> Option<u16> {
64        let bytes = data as *const T as *const u8;
65        let len = size_of::<T>();
66        self.submit_bytes(unsafe { core::slice::from_raw_parts(bytes, len) })
67    }
68
69    fn get_entry_pointer(&mut self, ent: u16) -> *mut u8 {
70        unsafe { self.memory.add(ent as usize * self.stride) }
71    }
72
73    pub fn update_head(&mut self, head: u16) {
74        if head >= self.len {
75            panic!("tried to set head to {} (len = {})", head, self.len);
76        }
77        self.head = head;
78    }
79
80    pub fn len(&self) -> u16 {
81        self.len
82    }
83
84    pub fn stride(&self) -> usize {
85        self.stride
86    }
87}
88
89pub struct CompletionQueue {
90    head: u16,
91    len: u16,
92    phase: bool,
93    stride: usize,
94    memory: *const u8,
95}
96
97impl CompletionQueue {
98    pub fn new(memory: &[u8], nr_entries: u16, stride: usize) -> Result<Self, CreateQueueError> {
99        if nr_entries as usize * stride > memory.len() {
100            return Err(CreateQueueError::MemoryRegionTooSmall);
101        }
102        if stride < core::mem::size_of::<CommonCompletion>() {
103            return Err(CreateQueueError::StrideTooSmall);
104        }
105        Ok(Self {
106            head: 0,
107            len: nr_entries,
108            phase: false,
109            stride,
110            memory: memory.as_ptr(),
111        })
112    }
113
114    pub fn stride(&self) -> usize {
115        self.stride
116    }
117
118    fn get_entry_pointer(&self, ent: u16) -> *const u8 {
119        unsafe { self.memory.add(ent as usize * self.stride) }
120    }
121
122    fn get_entry_compl_pointer(&self, ent: u16) -> *const CommonCompletion {
123        let ptr = self.get_entry_pointer(ent);
124        ptr as *const CommonCompletion
125    }
126
127    fn get_entry_slice(&self, ent: u16) -> &[u8] {
128        let ptr = self.get_entry_pointer(ent);
129        unsafe { core::slice::from_raw_parts(ptr, self.stride) }
130    }
131
132    pub fn ready(&self) -> bool {
133        let entry = unsafe {
134            self.get_entry_compl_pointer(self.head)
135                .as_ref()
136                .unwrap_unchecked()
137        };
138        entry.phase() != self.phase
139    }
140
141    pub fn get_completion_bytes(&mut self, output: &mut [u8]) -> Option<u16> {
142        if output.len() != self.stride {
143            panic!("completion output bytes too small");
144        }
145        // TODO: volatile?
146        let head = self.head;
147        let entry = unsafe {
148            self.get_entry_compl_pointer(head)
149                .as_ref()
150                .unwrap_unchecked()
151        };
152        if entry.phase() == self.phase {
153            return None;
154        }
155
156        output.copy_from_slice(self.get_entry_slice(head));
157
158        self.head = (head + 1) % self.len;
159        if self.head == 0 {
160            self.phase = !self.phase;
161        }
162        Some(self.head)
163    }
164
165    pub fn get_completion<T: Copy>(&mut self) -> Option<(u16, T)> {
166        let mut data = MaybeUninit::uninit();
167        let bytes = data.as_mut_ptr() as *mut u8;
168        let len = size_of::<T>();
169        let head =
170            self.get_completion_bytes(unsafe { core::slice::from_raw_parts_mut(bytes, len) })?;
171
172        Some((head, unsafe { data.assume_init() }))
173    }
174}