twizzler_driver/bus/
pcie.rs

1//! PCIe-specific functionality.
2
3use std::ptr::NonNull;
4
5pub use twizzler_abi::device::bus::pcie::*;
6use twizzler_abi::{
7    device::InterruptVector,
8    kso::{KactionCmd, KactionFlags},
9};
10use twizzler_rt_abi::{error::TwzError, Result};
11use volatile::{
12    access::{Access, ReadWrite, Readable},
13    map_field, VolatilePtr, VolatileRef,
14};
15
16use crate::device::{Device, MmioObject};
17
18pub struct PcieCapabilityIterator<'a> {
19    _dev: &'a Device,
20    cfg: &'a MmioObject,
21    off: usize,
22}
23
24#[derive(Debug, Clone, Copy)]
25#[allow(dead_code)]
26#[repr(C, packed(4))]
27pub struct PcieCapabilityHeader {
28    pub id: u8,
29    pub next: u8,
30}
31
32#[derive(Debug, Copy, Clone)]
33#[repr(C, packed(4))]
34pub struct MsiCapability {
35    pub header: PcieCapabilityHeader,
36    pub msg_ctrl: u16,
37    pub msg_addr_low: u32,
38    pub msg_addr_hi: u32,
39    pub msg_data: u16,
40    pub resv: u16,
41    pub mask: u32,
42    pub pending: u32,
43}
44
45#[derive(Debug, Copy, Clone)]
46#[repr(C, packed(4))]
47pub struct MsixCapability {
48    pub header: PcieCapabilityHeader,
49    pub msg_ctrl: u16,
50    pub table_offset_and_bir: u32,
51    pub pending_offset_and_bir: u32,
52}
53
54impl MsixCapability {
55    fn get_table_info<'a, A: Readable + Access>(msix: VolatilePtr<'a, Self, A>) -> (u8, usize) {
56        let info = map_field!(msix.table_offset_and_bir).read();
57        ((info & 0x7) as u8, (info & !0x7) as usize)
58    }
59
60    fn table_len<'a, A: Readable + Access>(msix: VolatilePtr<'a, Self, A>) -> usize {
61        (map_field!(msix.msg_ctrl).read() & 0x7ff) as usize
62    }
63}
64
65#[derive(Debug, Clone, Copy)]
66#[repr(C, packed(8))]
67pub struct MsixTableEntry {
68    msg_addr_lo: u32,
69    msg_addr_hi: u32,
70    msg_data: u32,
71    vec_ctrl: u32,
72}
73
74#[derive(Debug)]
75pub enum PcieCapability<'a> {
76    Unknown(u8),
77    Msi(VolatileRef<'a, MsiCapability>),
78    MsiX(VolatileRef<'a, MsixCapability>),
79    VendorSpecific(usize), /* Offset to the capability, as vendor specific capabilities do not
80                            * have a consistent definition. */
81}
82
83impl<'a> Iterator for PcieCapabilityIterator<'a> {
84    type Item = PcieCapability<'a>;
85
86    fn next(&mut self) -> Option<Self::Item> {
87        if self.off == 0 {
88            return None;
89        }
90        unsafe {
91            let cap = self.cfg.get_mmio_offset::<PcieCapabilityHeader>(self.off);
92            let cap = cap.as_ptr();
93            let ret = match map_field!(cap.id).read() {
94                5 => PcieCapability::Msi(self.cfg.get_mmio_offset_mut::<MsiCapability>(self.off)),
95                0x11 => {
96                    PcieCapability::MsiX(self.cfg.get_mmio_offset_mut::<MsixCapability>(self.off))
97                }
98                9 => PcieCapability::VendorSpecific(self.off),
99                x => PcieCapability::Unknown(x),
100            };
101            self.off = (map_field!(cap.next).read() & 0xfc) as usize;
102            Some(ret)
103        }
104    }
105}
106
107// TODO: allow for dest-ID and other options, and propegate all this stuff through the API.
108fn calc_msg_info(vec: InterruptVector, level: bool) -> (u64, u32) {
109    let addr = (0xfee << 20) | (0 << 12);
110    let data: u32 = vec.into();
111    let data = data | if level { 1 << 15 } else { 0 };
112    (addr, data)
113}
114
115impl Device {
116    pub fn pcie_capabilities<'a>(
117        &'a self,
118        mm: &'a MmioObject,
119    ) -> Option<PcieCapabilityIterator<'a>> {
120        let cfg = unsafe { mm.get_mmio_offset::<PcieDeviceHeader>(0) };
121        let cfg = cfg.as_ptr();
122        let ptr = map_field!(cfg.cap_ptr).read() & 0xfc;
123        let hdr = map_field!(cfg.fnheader);
124        if map_field!(hdr.status).read() & (1 << 4) == 0 {
125            return None;
126        }
127        Some(PcieCapabilityIterator {
128            _dev: self,
129            cfg: mm,
130            off: ptr as usize,
131        })
132    }
133
134    pub fn find_mmio_bar(&self, bar: usize) -> Option<MmioObject> {
135        let mut idx = 0;
136        while let Some(mm) = self.get_mmio(idx) {
137            if mm.get_info().info == bar as u64 {
138                return Some(mm);
139            }
140            idx += 1;
141        }
142        None
143    }
144
145    fn allocate_msix_interrupt(
146        &self,
147        msix: volatile::VolatilePtr<'_, MsixCapability, ReadWrite>,
148        vec: InterruptVector,
149        inum: usize,
150    ) -> Result<u32> {
151        let (bar, offset) = MsixCapability::get_table_info(msix);
152        map_field!(msix.msg_ctrl).write(1 << 15);
153        let mmio = self
154            .find_mmio_bar(bar.into())
155            .ok_or(TwzError::NOT_SUPPORTED)?;
156        let table = unsafe {
157            let start = mmio
158                .get_mmio_offset::<MsixTableEntry>(offset)
159                .as_ptr()
160                .as_raw_ptr()
161                .as_ptr();
162            let len = MsixCapability::table_len(msix);
163            VolatilePtr::new(NonNull::from(core::slice::from_raw_parts_mut(start, len)))
164        };
165        let (msg_addr, msg_data) = calc_msg_info(vec, false);
166        let entry = table.index(inum);
167        map_field!(entry.msg_addr_lo).write(msg_addr as u32);
168        map_field!(entry.msg_addr_hi).write((msg_addr >> 32) as u32);
169        map_field!(entry.msg_data).write(msg_data);
170        map_field!(entry.vec_ctrl).write(0);
171        Ok(inum as u32)
172    }
173
174    fn allocate_msi_interrupt(
175        &self,
176        _msi: &VolatilePtr<'_, MsiCapability, ReadWrite>,
177        _vec: InterruptVector,
178    ) -> Result<u32> {
179        todo!()
180    }
181
182    fn allocate_pcie_interrupt(&self, vec: InterruptVector, inum: usize) -> Result<u32> {
183        // Prefer MSI-X
184        let mm = self.find_mmio_bar(0xff).unwrap();
185        for cap in self.pcie_capabilities(&mm).ok_or(TwzError::NOT_SUPPORTED)? {
186            if let PcieCapability::MsiX(mut m) = cap {
187                for msitest in self.pcie_capabilities(&mm).ok_or(TwzError::NOT_SUPPORTED)? {
188                    if let PcieCapability::Msi(mut msi) = msitest {
189                        let msi = msi.as_mut_ptr();
190                        map_field!(msi.msg_ctrl).write(0);
191                    }
192                }
193                return self.allocate_msix_interrupt(m.as_mut_ptr(), vec, inum);
194            }
195        }
196        for cap in self.pcie_capabilities(&mm).ok_or(TwzError::NOT_SUPPORTED)? {
197            if let PcieCapability::Msi(mut m) = cap {
198                return self.allocate_msi_interrupt(&m.as_mut_ptr(), vec);
199            }
200        }
201        Err(TwzError::NOT_SUPPORTED)
202    }
203
204    pub fn allocate_interrupt(&self, inum: usize) -> Result<(InterruptVector, u32)> {
205        let vec = self.kaction(
206            KactionCmd::Specific(PcieKactionSpecific::AllocateInterrupt.into()),
207            0,
208            KactionFlags::empty(),
209            inum as u64,
210        )?;
211        let vec = vec.unwrap_u64().try_into()?;
212        let int = self.allocate_pcie_interrupt(vec, inum)?;
213        Ok((vec, int))
214    }
215}