Skip to content

Commit 7c82b6c

Browse files
committed
Add page attribute table support
1 parent 59ac07c commit 7c82b6c

File tree

4 files changed

+153
-34
lines changed

4 files changed

+153
-34
lines changed

src/registers/model_specific.rs

Lines changed: 106 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -67,6 +67,10 @@ pub struct UCet;
6767
#[derive(Debug)]
6868
pub struct SCet;
6969

70+
/// IA32_PAT: Page Attribute Table.
71+
#[derive(Debug)]
72+
pub struct Pat;
73+
7074
impl Efer {
7175
/// The underlying model specific register.
7276
pub const MSR: Msr = Msr(0xC000_0080);
@@ -112,6 +116,22 @@ impl SCet {
112116
pub const MSR: Msr = Msr(0x6A2);
113117
}
114118

119+
impl Pat {
120+
/// The underlying model specific register.
121+
pub const MSR: Msr = Msr(0x277);
122+
/// The default PAT configuration following a power up or reset of the processor.
123+
pub const DEFAULT: [PatFlags; 8] = [
124+
PatFlags::WRITE_BACK,
125+
PatFlags::WRITE_THROUGH,
126+
PatFlags::UNCACHED,
127+
PatFlags::UNCACHEABLE,
128+
PatFlags::WRITE_BACK,
129+
PatFlags::WRITE_THROUGH,
130+
PatFlags::UNCACHED,
131+
PatFlags::UNCACHEABLE,
132+
];
133+
}
134+
115135
bitflags! {
116136
/// Flags of the Extended Feature Enable Register.
117137
#[repr(transparent)]
@@ -161,6 +181,52 @@ bitflags! {
161181
}
162182
}
163183

184+
#[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Clone, Copy)]
185+
/// Flags for the [PAT](Pat).
186+
pub struct PatFlags(u8);
187+
impl PatFlags {
188+
/// Disables caching.
189+
pub const UNCACHEABLE: Self = Self(0x00);
190+
/// Uses a write combining cache policy.
191+
pub const WRITE_COMBINING: Self = Self(0x01);
192+
/// Uses a write through cache policy.
193+
pub const WRITE_THROUGH: Self = Self(0x04);
194+
/// Uses a write protected cache policy.
195+
pub const WRITE_PROTECTED: Self = Self(0x05);
196+
/// Uses a write back cache policy.
197+
pub const WRITE_BACK: Self = Self(0x06);
198+
/// Same as uncacheable, but can be overridden by MTRRs.
199+
pub const UNCACHED: Self = Self(0x07);
200+
201+
/// Converts from bits, returning `None` if the value is invalid.
202+
pub const fn from_bits(bits: u8) -> Option<Self> {
203+
match Self(bits) {
204+
Self::UNCACHEABLE
205+
| Self::WRITE_COMBINING
206+
| Self::WRITE_THROUGH
207+
| Self::WRITE_PROTECTED
208+
| Self::WRITE_BACK
209+
| Self::UNCACHED => Some(Self(bits)),
210+
_ => None,
211+
}
212+
}
213+
214+
/// Converts from bits without checking if the value is valid.
215+
///
216+
/// # Safety
217+
///
218+
/// `bits` must correspond to a valid memory type, otherwise a general protection exception will
219+
/// occur if it is written to the PAT.
220+
pub const unsafe fn from_bits_unchecked(bits: u8) -> Self {
221+
Self(bits)
222+
}
223+
224+
/// Gets the underlying bits.
225+
pub const fn bits(self) -> u8 {
226+
self.0
227+
}
228+
}
229+
164230
#[cfg(all(feature = "instructions", target_arch = "x86_64"))]
165231
mod x86_64 {
166232
use super::*;
@@ -636,4 +702,44 @@ mod x86_64 {
636702
Self::write(flags, legacy_bitmap);
637703
}
638704
}
705+
706+
impl Pat {
707+
/// Reads IA32_PAT.
708+
///
709+
/// # Safety
710+
///
711+
/// The PAT must be supported on the CPU, otherwise a general protection exception will
712+
/// occur. Support can be detected using the `cpuid` instruction.
713+
#[inline]
714+
pub unsafe fn read() -> [PatFlags; 8] {
715+
let bits = unsafe { Self::MSR.read() };
716+
let mut flags = [PatFlags::UNCACHEABLE; 8];
717+
for (i, flag) in flags.iter_mut().enumerate() {
718+
*flag = PatFlags((bits >> (8 * i)) as u8);
719+
}
720+
flags
721+
}
722+
723+
/// Writes IA32_PAT.
724+
///
725+
/// # Safety
726+
///
727+
/// All affected pages must be flushed from the TLB. Processor caches may also need to be
728+
/// flushed. Additionally, all pages that map to a given frame must have the same memory
729+
/// type.
730+
///
731+
/// The PAT must be supported on the CPU, otherwise a general protection exception will
732+
/// occur. Support can be detected using the `cpuid` instruction.
733+
#[inline]
734+
pub unsafe fn write(flags: [PatFlags; 8]) {
735+
let mut bits = 0u64;
736+
for (i, flag) in flags.iter().enumerate() {
737+
bits |= (flag.bits() as u64) << (8 * i);
738+
}
739+
let mut msr = Self::MSR;
740+
unsafe {
741+
msr.write(bits);
742+
}
743+
}
744+
}
639745
}

src/structures/paging/mapper/mapped_page_table.rs

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -421,7 +421,6 @@ impl<P: PageTableFrameMapping> Mapper<Size4KiB> for MappedPageTable<'_, P> {
421421

422422
let frame = p1_entry.frame().map_err(|err| match err {
423423
FrameError::FrameNotPresent => UnmapError::PageNotMapped,
424-
FrameError::HugeFrame => UnmapError::ParentEntryHugePage,
425424
})?;
426425

427426
p1_entry.set_unused();
@@ -711,6 +710,9 @@ impl<P: PageTableFrameMapping> PageTableWalker<P> {
711710
&self,
712711
entry: &'b PageTableEntry,
713712
) -> Result<&'b PageTable, PageTableWalkError> {
713+
if entry.flags().contains(PageTableFlags::HUGE_PAGE) {
714+
return Err(PageTableWalkError::MappedToHugePage);
715+
}
714716
let page_table_ptr = self
715717
.page_table_frame_mapping
716718
.frame_to_pointer(entry.frame()?);
@@ -729,6 +731,9 @@ impl<P: PageTableFrameMapping> PageTableWalker<P> {
729731
&self,
730732
entry: &'b mut PageTableEntry,
731733
) -> Result<&'b mut PageTable, PageTableWalkError> {
734+
if entry.flags().contains(PageTableFlags::HUGE_PAGE) {
735+
return Err(PageTableWalkError::MappedToHugePage);
736+
}
732737
let page_table_ptr = self
733738
.page_table_frame_mapping
734739
.frame_to_pointer(entry.frame()?);
@@ -832,7 +837,6 @@ impl From<FrameError> for PageTableWalkError {
832837
#[inline]
833838
fn from(err: FrameError) -> Self {
834839
match err {
835-
FrameError::HugeFrame => PageTableWalkError::MappedToHugePage,
836840
FrameError::FrameNotPresent => PageTableWalkError::NotMapped,
837841
}
838842
}

src/structures/paging/mapper/recursive_page_table.rs

Lines changed: 27 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -322,9 +322,11 @@ impl Mapper<Size1GiB> for RecursivePageTable<'_> {
322322
let p4 = &mut self.p4;
323323
let p4_entry = &p4[page.p4_index()];
324324

325-
p4_entry.frame().map_err(|err| match err {
325+
if p4_entry.flags().contains(PageTableFlags::HUGE_PAGE) {
326+
return Err(UnmapError::ParentEntryHugePage);
327+
}
328+
p4_entry.frame::<Size4KiB>().map_err(|err| match err {
326329
FrameError::FrameNotPresent => UnmapError::PageNotMapped,
327-
FrameError::HugeFrame => UnmapError::ParentEntryHugePage,
328330
})?;
329331

330332
let p3 = unsafe { &mut *(p3_ptr(page, self.recursive_index)) };
@@ -441,16 +443,20 @@ impl Mapper<Size2MiB> for RecursivePageTable<'_> {
441443
) -> Result<(PhysFrame<Size2MiB>, MapperFlush<Size2MiB>), UnmapError> {
442444
let p4 = &mut self.p4;
443445
let p4_entry = &p4[page.p4_index()];
444-
p4_entry.frame().map_err(|err| match err {
446+
if p4_entry.flags().contains(PageTableFlags::HUGE_PAGE) {
447+
return Err(UnmapError::ParentEntryHugePage);
448+
}
449+
p4_entry.frame::<Size4KiB>().map_err(|err| match err {
445450
FrameError::FrameNotPresent => UnmapError::PageNotMapped,
446-
FrameError::HugeFrame => UnmapError::ParentEntryHugePage,
447451
})?;
448452

449453
let p3 = unsafe { &mut *(p3_ptr(page, self.recursive_index)) };
450454
let p3_entry = &p3[page.p3_index()];
451-
p3_entry.frame().map_err(|err| match err {
455+
if p3_entry.flags().contains(PageTableFlags::HUGE_PAGE) {
456+
return Err(UnmapError::ParentEntryHugePage);
457+
}
458+
p3_entry.frame::<Size4KiB>().map_err(|err| match err {
452459
FrameError::FrameNotPresent => UnmapError::PageNotMapped,
453-
FrameError::HugeFrame => UnmapError::ParentEntryHugePage,
454460
})?;
455461

456462
let p2 = unsafe { &mut *(p2_ptr(page, self.recursive_index)) };
@@ -596,31 +602,36 @@ impl Mapper<Size4KiB> for RecursivePageTable<'_> {
596602
) -> Result<(PhysFrame<Size4KiB>, MapperFlush<Size4KiB>), UnmapError> {
597603
let p4 = &mut self.p4;
598604
let p4_entry = &p4[page.p4_index()];
599-
p4_entry.frame().map_err(|err| match err {
605+
if p4_entry.flags().contains(PageTableFlags::HUGE_PAGE) {
606+
return Err(UnmapError::ParentEntryHugePage);
607+
}
608+
p4_entry.frame::<Size4KiB>().map_err(|err| match err {
600609
FrameError::FrameNotPresent => UnmapError::PageNotMapped,
601-
FrameError::HugeFrame => UnmapError::ParentEntryHugePage,
602610
})?;
603611

604612
let p3 = unsafe { &mut *(p3_ptr(page, self.recursive_index)) };
605613
let p3_entry = &p3[page.p3_index()];
606-
p3_entry.frame().map_err(|err| match err {
614+
if p3_entry.flags().contains(PageTableFlags::HUGE_PAGE) {
615+
return Err(UnmapError::ParentEntryHugePage);
616+
}
617+
p3_entry.frame::<Size4KiB>().map_err(|err| match err {
607618
FrameError::FrameNotPresent => UnmapError::PageNotMapped,
608-
FrameError::HugeFrame => UnmapError::ParentEntryHugePage,
609619
})?;
610620

611621
let p2 = unsafe { &mut *(p2_ptr(page, self.recursive_index)) };
612622
let p2_entry = &p2[page.p2_index()];
613-
p2_entry.frame().map_err(|err| match err {
623+
if p2_entry.flags().contains(PageTableFlags::HUGE_PAGE) {
624+
return Err(UnmapError::ParentEntryHugePage);
625+
}
626+
p2_entry.frame::<Size4KiB>().map_err(|err| match err {
614627
FrameError::FrameNotPresent => UnmapError::PageNotMapped,
615-
FrameError::HugeFrame => UnmapError::ParentEntryHugePage,
616628
})?;
617629

618630
let p1 = unsafe { &mut *(p1_ptr(page, self.recursive_index)) };
619631
let p1_entry = &mut p1[page.p1_index()];
620632

621633
let frame = p1_entry.frame().map_err(|err| match err {
622634
FrameError::FrameNotPresent => UnmapError::PageNotMapped,
623-
FrameError::HugeFrame => UnmapError::ParentEntryHugePage,
624635
})?;
625636

626637
p1_entry.set_unused();
@@ -818,9 +829,6 @@ impl Translate for RecursivePageTable<'_> {
818829
if p1_entry.is_unused() {
819830
return TranslateResult::NotMapped;
820831
}
821-
if p1_entry.flags().contains(PageTableFlags::HUGE_PAGE) {
822-
panic!("level 1 entry has huge page bit set")
823-
}
824832

825833
let frame = match PhysFrame::from_start_address(p1_entry.addr()) {
826834
Ok(frame) => frame,
@@ -890,6 +898,9 @@ impl CleanUp for RecursivePageTable<'_> {
890898
!(level == PageTableLevel::Four && *i == recursive_index.into())
891899
})
892900
{
901+
if entry.flags().contains(PageTableFlags::HUGE_PAGE) {
902+
continue;
903+
}
893904
if let Ok(frame) = entry.frame() {
894905
let start = VirtAddr::forward_checked_impl(
895906
table_addr,

src/structures/paging/page_table.rs

Lines changed: 14 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -15,9 +15,6 @@ use bitflags::bitflags;
1515
pub enum FrameError {
1616
/// The entry does not have the `PRESENT` flag set, so it isn't currently mapped to a frame.
1717
FrameNotPresent,
18-
/// The entry does have the `HUGE_PAGE` flag set. The `frame` method has a standard 4KiB frame
19-
/// as return type, so a huge frame can't be returned.
20-
HugeFrame,
2118
}
2219

2320
/// A 64-bit page table entry.
@@ -63,16 +60,12 @@ impl PageTableEntry {
6360
/// Returns the following errors:
6461
///
6562
/// - `FrameError::FrameNotPresent` if the entry doesn't have the `PRESENT` flag set.
66-
/// - `FrameError::HugeFrame` if the entry has the `HUGE_PAGE` flag set (for huge pages the
67-
/// `addr` function must be used)
6863
#[inline]
69-
pub fn frame(&self) -> Result<PhysFrame, FrameError> {
70-
if !self.flags().contains(PageTableFlags::PRESENT) {
71-
Err(FrameError::FrameNotPresent)
72-
} else if self.flags().contains(PageTableFlags::HUGE_PAGE) {
73-
Err(FrameError::HugeFrame)
74-
} else {
64+
pub fn frame<S: PageSize>(&self) -> Result<PhysFrame<S>, FrameError> {
65+
if self.flags().contains(PageTableFlags::PRESENT) {
7566
Ok(PhysFrame::containing_address(self.addr()))
67+
} else {
68+
Err(FrameError::FrameNotPresent)
7669
}
7770
}
7871

@@ -86,7 +79,6 @@ impl PageTableEntry {
8679
/// Map the entry to the specified physical frame with the specified flags.
8780
#[inline]
8881
pub fn set_frame(&mut self, frame: PhysFrame, flags: PageTableFlags) {
89-
assert!(!flags.contains(PageTableFlags::HUGE_PAGE));
9082
self.set_addr(frame.start_address(), flags)
9183
}
9284

@@ -128,17 +120,21 @@ bitflags! {
128120
/// Controls whether accesses from userspace (i.e. ring 3) are permitted.
129121
const USER_ACCESSIBLE = 1 << 2;
130122
/// If this bit is set, a “write-through” policy is used for the cache, else a “write-back”
131-
/// policy is used.
123+
/// policy is used. This referred to as the page-level write-through (PWT) bit.
132124
const WRITE_THROUGH = 1 << 3;
133-
/// Disables caching for the pointed entry is cacheable.
125+
/// Disables caching for the pointed entry if it is cacheable. This referred to as the
126+
/// page-level cache disable (PCD) bit.
134127
const NO_CACHE = 1 << 4;
135128
/// Set by the CPU when the mapped frame or page table is accessed.
136129
const ACCESSED = 1 << 5;
137130
/// Set by the CPU on a write to the mapped frame.
138131
const DIRTY = 1 << 6;
139-
/// Specifies that the entry maps a huge frame instead of a page table. Only allowed in
140-
/// P2 or P3 tables.
132+
/// Specifies that the entry maps a huge frame instead of a page table. This is the same bit
133+
/// as `PAT_4KIB_PAGE`.
141134
const HUGE_PAGE = 1 << 7;
135+
/// This is the PAT bit for page table entries that point to 4KiB pages. This is the same
136+
/// bit as `HUGE_PAGE`.
137+
const PAT_4KIB_PAGE = 1 << 7;
142138
/// Indicates that the mapping is present in all address spaces, so it isn't flushed from
143139
/// the TLB on an address space switch.
144140
const GLOBAL = 1 << 8;
@@ -148,6 +144,8 @@ bitflags! {
148144
const BIT_10 = 1 << 10;
149145
/// Available to the OS, can be used to store additional data, e.g. custom flags.
150146
const BIT_11 = 1 << 11;
147+
/// This is the PAT bit for page table entries that point to huge pages.
148+
const PAT_HUGE_PAGE = 1 << 12;
151149
/// Available to the OS, can be used to store additional data, e.g. custom flags.
152150
const BIT_52 = 1 << 52;
153151
/// Available to the OS, can be used to store additional data, e.g. custom flags.

0 commit comments

Comments
 (0)