|
| 1 | +// code derived from the x86_64 crate |
| 2 | +// https://docs.rs/x86_64/latest/src/x86_64/addr.rs.html |
| 3 | +// see ATTRIBUTIONS |
| 4 | +use crate::defs::*; |
| 5 | +use bitflags::bitflags; |
| 6 | +#[repr(align(4096))] |
| 7 | +#[repr(C)] |
| 8 | +#[derive(Clone)] |
| 9 | +pub struct Pagetable { |
| 10 | + entries: [PTE; Self::ENTRY_COUNT], |
| 11 | +} |
| 12 | + |
| 13 | +#[derive(Clone)] |
| 14 | +#[repr(transparent)] |
| 15 | +pub struct PTE { |
| 16 | + entry: u64, |
| 17 | +} |
| 18 | + |
| 19 | +// use wrapped VA and PA instead of simply u64 as a sanity check: |
| 20 | +// VA must be sign extended, PA has at most 52 bits |
| 21 | +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] |
| 22 | +#[repr(transparent)] |
| 23 | +pub struct VAddr(u64); |
| 24 | +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] |
| 25 | +#[repr(transparent)] |
| 26 | +pub struct PAddr(u64); |
| 27 | + |
| 28 | +bitflags! { |
| 29 | +pub struct PTEFlags:u64 { |
| 30 | + const ZERO = 0; |
| 31 | + const PRESENT = 1 << 0; |
| 32 | + const WRITABLE = 1 << 1; |
| 33 | + const USER = 1 << 2; |
| 34 | + const WT = 1 << 3; |
| 35 | + const NC = 1 << 4; |
| 36 | + const ACCESSED = 1 << 5; |
| 37 | + const DIRTY = 1 << 6; |
| 38 | + const HUGE_PAGE = 1 << 7; |
| 39 | + const GLOBAL = 1 << 8; |
| 40 | + const B9 = 1 << 9; |
| 41 | + const B10 = 1 << 10; |
| 42 | + const B11 = 1 << 11; |
| 43 | + // [51:12] is used for translation address |
| 44 | + // [62:52] are user defined. |
| 45 | + // [63] NO_EXECUTE, needs to be enabled in EFER. |
| 46 | + const NE = 1 << 63; |
| 47 | +} |
| 48 | +} |
| 49 | + |
| 50 | +impl Pagetable { |
| 51 | + const ENTRY_COUNT: usize = 512; |
| 52 | + /// Creates an empty page table. |
| 53 | + #[inline] |
| 54 | + pub const fn new() -> Self { |
| 55 | + const EMPTY: PTE = PTE::new(); |
| 56 | + Pagetable { |
| 57 | + entries: [EMPTY; Self::ENTRY_COUNT], |
| 58 | + } |
| 59 | + } |
| 60 | + |
| 61 | + /// Clears all entries. |
| 62 | + #[inline] |
| 63 | + pub fn zero(&mut self) { |
| 64 | + for entry in self.iter_mut() { |
| 65 | + entry.set_unused(); |
| 66 | + } |
| 67 | + } |
| 68 | + |
| 69 | + /// Returns an iterator over the entries of the page table. |
| 70 | + #[inline] |
| 71 | + pub fn iter(&self) -> impl Iterator<Item = &PTE> { |
| 72 | + (0..512).map(move |i| &self.entries[i]) |
| 73 | + } |
| 74 | + |
| 75 | + /// Returns an iterator that allows modifying the entries of the page table. |
| 76 | + #[inline] |
| 77 | + pub fn iter_mut(&mut self) -> impl Iterator<Item = &mut PTE> { |
| 78 | + // Note that we intentionally don't just return `self.entries.iter()`: |
| 79 | + // Some users may choose to create a reference to a page table at |
| 80 | + // `0xffff_ffff_ffff_f000`. This causes problems because calculating |
| 81 | + // the end pointer of the page tables causes an overflow. Therefore |
| 82 | + // creating page tables at that address is unsound and must be avoided. |
| 83 | + // Unfortunately creating such page tables is quite common when |
| 84 | + // recursive page tables are used, so we try to avoid calculating the |
| 85 | + // end pointer if possible. `core::slice::Iter` calculates the end |
| 86 | + // pointer to determine when it should stop yielding elements. Because |
| 87 | + // we want to avoid calculating the end pointer, we don't use |
| 88 | + // `core::slice::Iter`, we implement our own iterator that doesn't |
| 89 | + // calculate the end pointer. This doesn't make creating page tables at |
| 90 | + // that address sound, but it avoids some easy to trigger |
| 91 | + // miscompilations. |
| 92 | + let ptr = self.entries.as_mut_ptr(); |
| 93 | + (0..512).map(move |i| unsafe { &mut *ptr.add(i) }) |
| 94 | + } |
| 95 | + |
| 96 | + /// Checks if the page table is empty (all entries are zero). |
| 97 | + #[inline] |
| 98 | + pub fn is_empty(&self) -> bool { |
| 99 | + self.iter().all(|entry| entry.is_unused()) |
| 100 | + } |
| 101 | +} |
| 102 | + |
| 103 | +impl VAddr { |
| 104 | + #[inline] |
| 105 | + pub const fn new(addr: u64) -> VAddr { |
| 106 | + return Self::try_new(addr).expect("VA must be sign extended in 16 MSBs"); |
| 107 | + } |
| 108 | + |
| 109 | + #[inline] |
| 110 | + pub const fn try_new(addr: u64) -> Option<VAddr> { |
| 111 | + let v = Self::new_truncate(addr); |
| 112 | + if v.0 == addr { |
| 113 | + Some(v) |
| 114 | + } else { |
| 115 | + None |
| 116 | + } |
| 117 | + } |
| 118 | + |
| 119 | + #[inline] |
| 120 | + pub const fn new_truncate(addr: u64) -> VAddr { |
| 121 | + // sign extend the upper bits |
| 122 | + VAddr(((addr << 16) as i64 >> 16) as u64) |
| 123 | + } |
| 124 | + |
| 125 | + #[inline] |
| 126 | + pub const fn as_u64(self) -> u64 { |
| 127 | + self.0 |
| 128 | + } |
| 129 | + |
| 130 | + /// Converts the address to a raw pointer. |
| 131 | + #[cfg(target_pointer_width = "64")] |
| 132 | + #[inline] |
| 133 | + pub const fn as_ptr<T>(self) -> *const T { |
| 134 | + self.as_u64() as *const T |
| 135 | + } |
| 136 | + |
| 137 | + /// Converts the address to a mutable raw pointer. |
| 138 | + #[cfg(target_pointer_width = "64")] |
| 139 | + #[inline] |
| 140 | + pub const fn as_mut_ptr<T>(self) -> *mut T { |
| 141 | + self.as_ptr::<T>() as *mut T |
| 142 | + } |
| 143 | + |
| 144 | + /// Returns the 9-bit level 1 page table index. |
| 145 | + #[inline] |
| 146 | + pub const fn p1_index(self) -> usize { |
| 147 | + (self.0 >> 12) as usize |
| 148 | + } |
| 149 | + |
| 150 | + /// Returns the 9-bit level 2 page table index. |
| 151 | + #[inline] |
| 152 | + pub const fn p2_index(self) -> usize { |
| 153 | + (self.0 >> 12 >> 9) as usize |
| 154 | + } |
| 155 | + |
| 156 | + /// Returns the 9-bit level 3 page table index. |
| 157 | + #[inline] |
| 158 | + pub const fn p3_index(self) -> usize { |
| 159 | + (self.0 >> 12 >> 9 >> 9) as usize |
| 160 | + } |
| 161 | + |
| 162 | + /// Returns the 9-bit level 4 page table index. |
| 163 | + #[inline] |
| 164 | + pub const fn p4_index(self) -> usize { |
| 165 | + (self.0 >> 12 >> 9 >> 9 >> 9) as usize |
| 166 | + } |
| 167 | +} |
| 168 | + |
| 169 | +impl PAddr { |
| 170 | + #[inline] |
| 171 | + pub const fn new(addr: u64) -> Self { |
| 172 | + Self::try_new(addr).expect("PA shall not have more than 52 bits") |
| 173 | + } |
| 174 | + |
| 175 | + /// Creates a new physical address, throwing bits 52..64 away. |
| 176 | + #[inline] |
| 177 | + pub const fn new_truncate(addr: u64) -> PAddr { |
| 178 | + PAddr(addr % (1 << 52)) |
| 179 | + } |
| 180 | + |
| 181 | + /// Tries to create a new physical address. |
| 182 | + /// Fails if any bits in the range 52 to 64 are set. |
| 183 | + #[inline] |
| 184 | + pub const fn try_new(addr: u64) -> Option<Self> { |
| 185 | + let p = Self::new_truncate(addr); |
| 186 | + if p.0 == addr { |
| 187 | + Some(p) |
| 188 | + } else { |
| 189 | + None |
| 190 | + } |
| 191 | + } |
| 192 | +} |
| 193 | + |
| 194 | +impl PTE { |
| 195 | + #[inline] |
| 196 | + pub const fn new() -> Self { |
| 197 | + PTE { entry: 0 } |
| 198 | + } |
| 199 | + |
| 200 | + #[inline] |
| 201 | + pub const fn is_unused(&self) -> bool { |
| 202 | + self.entry == 0 |
| 203 | + } |
| 204 | + |
| 205 | + #[inline] |
| 206 | + pub fn set_unused(&mut self) -> bool { |
| 207 | + self.entry == 0 |
| 208 | + } |
| 209 | + |
| 210 | + #[inline] |
| 211 | + pub const fn flags(&self) -> PTEFlags { |
| 212 | + // from_bits_truncate ignores undefined bits. |
| 213 | + PTEFlags::from_bits_truncate(self.entry) |
| 214 | + } |
| 215 | + |
| 216 | + #[inline] |
| 217 | + pub const fn addr(&self) -> PAddr { |
| 218 | + PAddr::new(self.entry & 0x000f_ffff_ffff_f000) |
| 219 | + } |
| 220 | +} |
0 commit comments