From 6d32033a41da0fcedce870720f30c2b24e0283f9 Mon Sep 17 00:00:00 2001 From: Mayank Kumar Date: Mon, 10 Mar 2025 22:59:46 +0530 Subject: [PATCH 01/10] feat: opentmk init feat: opentmk init feat: opentmk init feat: opentmk init feat: opentmk init feat: opentmk init feat: opentmk init feat: init 1 feat: init 2 feat: init 1 feat: opentmk feat: opentmk init 3 feat: opentmk init 4 feat: opentmk init 4 --- Cargo.lock | 75 +++ Cargo.toml | 2 + opentmk/Cargo.toml | 32 + opentmk/README.md | 3 + opentmk/build_deploy.sh | 3 + opentmk/src/arch/aarch64/hypercall.rs | 27 + opentmk/src/arch/aarch64/mod.rs | 2 + opentmk/src/arch/aarch64/serial.rs | 240 +++++++ opentmk/src/arch/mod.rs | 16 + opentmk/src/arch/x86_64/hypercall.rs | 53 ++ opentmk/src/arch/x86_64/interrupt.rs | 50 ++ .../arch/x86_64/interrupt_handler_register.rs | 606 ++++++++++++++++++ opentmk/src/arch/x86_64/mod.rs | 4 + opentmk/src/arch/x86_64/serial.rs | 125 ++++ opentmk/src/main.rs | 27 + opentmk/src/slog.rs | 242 +++++++ opentmk/src/sync.rs | 490 ++++++++++++++ opentmk/src/uefi/alloc.rs | 86 +++ opentmk/src/uefi/context.rs | 61 ++ opentmk/src/uefi/hypercall.rs | 604 +++++++++++++++++ opentmk/src/uefi/hypvctx.rs | 403 ++++++++++++ opentmk/src/uefi/init.rs | 57 ++ opentmk/src/uefi/mod.rs | 36 ++ opentmk/src/uefi/rt.rs | 23 + opentmk/src/uefi/tests/hv_misc.rs | 136 ++++ opentmk/src/uefi/tests/hv_processor.rs | 75 +++ opentmk/src/uefi/tests/mod.rs | 14 + .../src/tasks/guest_test/uefi/gpt_efi_disk.rs | 2 +- 28 files changed, 3493 insertions(+), 1 deletion(-) create mode 100644 opentmk/Cargo.toml create mode 100644 opentmk/README.md create mode 100755 opentmk/build_deploy.sh create mode 100644 opentmk/src/arch/aarch64/hypercall.rs create mode 100644 opentmk/src/arch/aarch64/mod.rs create mode 100644 opentmk/src/arch/aarch64/serial.rs create mode 100644 opentmk/src/arch/mod.rs create mode 100644 opentmk/src/arch/x86_64/hypercall.rs create mode 100644 opentmk/src/arch/x86_64/interrupt.rs create mode 100644 opentmk/src/arch/x86_64/interrupt_handler_register.rs create mode 100644 opentmk/src/arch/x86_64/mod.rs create mode 100644 opentmk/src/arch/x86_64/serial.rs create mode 100644 opentmk/src/main.rs create mode 100644 opentmk/src/slog.rs create mode 100644 opentmk/src/sync.rs create mode 100644 opentmk/src/uefi/alloc.rs create mode 100644 opentmk/src/uefi/context.rs create mode 100644 opentmk/src/uefi/hypercall.rs create mode 100644 opentmk/src/uefi/hypvctx.rs create mode 100644 opentmk/src/uefi/init.rs create mode 100644 opentmk/src/uefi/mod.rs create mode 100644 opentmk/src/uefi/rt.rs create mode 100644 opentmk/src/uefi/tests/hv_misc.rs create mode 100644 opentmk/src/uefi/tests/hv_processor.rs create mode 100644 opentmk/src/uefi/tests/mod.rs diff --git a/Cargo.lock b/Cargo.lock index f86c244203..302b3ed249 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3547,6 +3547,9 @@ name = "lazy_static" version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +dependencies = [ + "spin 0.5.2", +] [[package]] name = "libc" @@ -3609,6 +3612,15 @@ dependencies = [ "escape8259", ] +[[package]] +name = "linked_list_allocator" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9afa463f5405ee81cdb9cc2baf37e08ec7e4c8209442b5d72c04cfb2cd6e6286" +dependencies = [ + "spinning_top", +] + [[package]] name = "linkme" version = "0.3.31" @@ -4769,6 +4781,27 @@ dependencies = [ "thiserror 2.0.0", ] +[[package]] +name = "opentmk" +version = "0.0.0" +dependencies = [ + "arrayvec", + "bitfield-struct", + "cfg-if", + "hvdef", + "lazy_static", + "linked_list_allocator", + "memory_range", + "minimal_rt", + "minimal_rt_build", + "serde", + "serde_json", + "spin 0.10.0", + "uefi", + "x86_64", + "zerocopy 0.8.14", +] + [[package]] name = "openvmm" version = "0.0.0" @@ -6360,6 +6393,30 @@ dependencies = [ "zerocopy 0.8.14", ] +[[package]] +name = "spin" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" + +[[package]] +name = "spin" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5fe4ccb98d9c292d56fec89a5e07da7fc4cf0dc11e156b41793132775d3e591" +dependencies = [ + "lock_api", +] + +[[package]] +name = "spinning_top" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b9eb1a2f4c41445a3a0ff9abc5221c5fcd28e1f13cd7c0397706f9ac938ddb0" +dependencies = [ + "lock_api", +] + [[package]] name = "stackfuture" version = "0.3.0" @@ -8736,6 +8793,12 @@ dependencies = [ "vmsocket", ] +[[package]] +name = "volatile" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "442887c63f2c839b346c192d047a7c87e73d0689c9157b00b53dcc27dd5ea793" + [[package]] name = "vpci" version = "0.0.0" @@ -9313,6 +9376,18 @@ dependencies = [ "tap", ] +[[package]] +name = "x86_64" +version = "0.15.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f042214de98141e9c8706e8192b73f56494087cc55ebec28ce10f26c5c364ae" +dependencies = [ + "bit_field", + "bitflags 2.6.0", + "rustversion", + "volatile", +] + [[package]] name = "x86defs" version = "0.0.0" diff --git a/Cargo.toml b/Cargo.toml index 1dd0ca13fb..fd2106e652 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -42,6 +42,7 @@ members = [ "vm/loader/igvmfilegen", "vm/vmgs/vmgs_lib", "vm/vmgs/vmgstool", + "opentmk" ] exclude = [ "xsync", @@ -525,6 +526,7 @@ xshell-macros = "0.2" # We add the derive feature here since the vast majority of our crates use it. #zerocopy = { version = "0.7.32", features = ["derive"]} zerocopy = { version = "0.8.14", features = ["derive"]} +linked_list_allocator = "0.10.5" [workspace.metadata.xtask.unused-deps] # Pulled in through "tracing", but we need to pin the version diff --git a/opentmk/Cargo.toml b/opentmk/Cargo.toml new file mode 100644 index 0000000000..752022b057 --- /dev/null +++ b/opentmk/Cargo.toml @@ -0,0 +1,32 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +[package] +name = "opentmk" +edition.workspace = true +rust-version.workspace = true + +[dependencies] +uefi = { workspace = true, features = ["alloc"] } +minimal_rt.workspace = true +linked_list_allocator = { workspace = true } +hvdef = {workspace = true} +zerocopy = {workspace = true} +memory_range = { workspace = true } +arrayvec = {workspace = true} +cfg-if.workspace = true +bitfield-struct.workspace = true +x86_64 = "0.15.2" +lazy_static = { version = "1.4.0", features = ["spin_no_std"] } +serde_json = { version = "1.0", default-features = false, features = ["alloc"] } +spin = "0.10.0" +serde = {version = "1.0", default-features = false} +[lints] +workspace = true + +[build-dependencies] +minimal_rt_build.workspace = true + +[profile.release] +debug = false +strip = "debuginfo" diff --git a/opentmk/README.md b/opentmk/README.md new file mode 100644 index 0000000000..999308fc0b --- /dev/null +++ b/opentmk/README.md @@ -0,0 +1,3 @@ +# `guest_test_uefi` + +See the guide for more info on how to build/run the code in this crate. diff --git a/opentmk/build_deploy.sh b/opentmk/build_deploy.sh new file mode 100755 index 0000000000..0c68e1643e --- /dev/null +++ b/opentmk/build_deploy.sh @@ -0,0 +1,3 @@ +RUST_BACKTRACE=1 cargo build -p opentmk --target x86_64-unknown-uefi +cargo xtask guest-test uefi --bootx64 ./target/x86_64-unknown-uefi/debug/opentmk.efi +qemu-img convert -f raw -O vhdx ./target/x86_64-unknown-uefi/debug/opentmk.img ~/projects/opentmk.vhdx \ No newline at end of file diff --git a/opentmk/src/arch/aarch64/hypercall.rs b/opentmk/src/arch/aarch64/hypercall.rs new file mode 100644 index 0000000000..35011e089a --- /dev/null +++ b/opentmk/src/arch/aarch64/hypercall.rs @@ -0,0 +1,27 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +/// Writes a synthehtic register to tell the hypervisor the OS ID for the boot shim. +fn report_os_id(guest_os_id: u64) { + // On ARM64, to be able to make hypercalls, one needs first to set the Guest OS ID + // synthetic register using a hypercall. Can't use `Hvcall::set_register` at that will + // lead to the infinite recursion as that function will first try initializing hypercalls + // with setting a register. + // + // Only one very specific HvSetVpRegisters hypercall is allowed to set the Guest OS ID + // (this is TLFS section 17.4.4.1.1 and 5.3), and that must be the fast hypercall. + let _ = minimal_rt::arch::hypercall::set_register_fast( + hvdef::HvArm64RegisterName::GuestOsId.into(), + guest_os_id.into(), + ); +} + +pub(crate) fn initialize(guest_os_id: u64) { + // We are assuming we are running under a Microsoft hypervisor. + report_os_id(guest_os_id); +} + +/// Call before jumping to kernel. +pub(crate) fn uninitialize() { + report_os_id(0); +} \ No newline at end of file diff --git a/opentmk/src/arch/aarch64/mod.rs b/opentmk/src/arch/aarch64/mod.rs new file mode 100644 index 0000000000..594be8b42a --- /dev/null +++ b/opentmk/src/arch/aarch64/mod.rs @@ -0,0 +1,2 @@ +pub mod hypercall; +pub mod serial; \ No newline at end of file diff --git a/opentmk/src/arch/aarch64/serial.rs b/opentmk/src/arch/aarch64/serial.rs new file mode 100644 index 0000000000..f68e6cb200 --- /dev/null +++ b/opentmk/src/arch/aarch64/serial.rs @@ -0,0 +1,240 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +//! aarch64 MMIO-based serial port, UART PL011. +//! +//! Used for debug output. Follows +//! [PrimeCell UART (PL011) Technical Reference Manual](https://developer.arm.com/documentation/ddi0183/g/) +//! +//! PL011 Registers: +//! +//! Offset Name Type Reset Bits Description +//! ---------------------------------------------------------------------- +//! 0x000 UARTDR RW 0x--- 12/8 Data Register +//! 0x004 UARTRSR/UARTECR RW 0x0 4/0 Receive Status Register/Error Clear Register +//! 0x018 UARTFR RO 0b-10010--- 9 Flag Register +//! 0x020 UARTILPR RW 0x00 8 IrDA Low-Power Counter Register +//! 0x024 UARTIBRD RW 0x0000 16 Integer Baud Rate Register +//! 0x028 UARTFBRD RW 0x00 6 Fractional Baud Rate Register +//! 0x02C UARTLCR_H RW 0x00 8 Line Control Register +//! 0x030 UARTCR RW 0x0300 16 Control Register +//! 0x034 UARTIFLS RW 0x12 6 Interrupt FIFO Level Select Register +//! 0x038 UARTIMSC RW 0x000 11 Interrupt Mask Set/Clear Register +//! 0x03C UARTRIS RO 0x00- 11 Raw Interrupt Status Register +//! 0x040 UARTMIS RO 0x00- 11 Masked Interrupt Status Register +//! 0x044 UARTICR WO - 11 Interrupt Clear Register +//! 0x048 UARTDMACR RW 0x00 3 DMA Control Register +//! 0xFE0 UARTPeriphID0 RO 0x11 8 UARTPeriphID0 Register +//! 0xFE4 UARTPeriphID1 RO 0x10 8 UARTPeriphID1 Register +//! 0xFE8 UARTPeriphID2 RO 0x_4a 8 UARTPeriphID2 Register +//! 0xFEC UARTPeriphID3 RO 0x00 8 UARTPeriphID3 Register +//! 0xFF0 UARTPCellID0 RO 0x0D 8 UARTPCellID0 Register +//! 0xFF4 UARTPCellID1 RO 0xF0 8 UARTPCellID1 Register +//! 0xFF8 UARTPCellID2 RO 0x05 8 UARTPCellID2 Register +//! 0xFFC UARTPCellID3 RO 0xB1 8 UARTPCellID3 Register + +#![allow(dead_code)] + +use core::hint::spin_loop; +use core::sync::atomic::AtomicBool; +use core::sync::atomic::Ordering; + +#[derive(Debug, Clone, Copy)] +#[repr(u16)] +enum Pl011Register { + /// Data Register + Dr = 0x000, + /// Receive Status Register/Error Clear Register + RsrOrEcr = 0x004, + /// Flag register + Fr = 0x018, + /// Integer Baud Rate Register + Ibrd = 0x024, + /// Fractional Baud Rate Register + Fbrd = 0x028, + /// Line Control Register + LcrHigh = 0x02c, + /// Control Register + Cr = 0x030, + /// Masked Interrupt Status Register + Imsc = 0x038, + /// Interrupt Clear Register + Icr = 0x044, + /// DMA Control Register + DmaCr = 0x048, + /// UARTPeriphID0 Register + PeriphID0 = 0xFE0, + /// UARTPeriphID1 Register + PeriphID1 = 0xFE4, + /// UARTPeriphID2 Register + PeriphID2 = 0xFE8, + /// UARTPeriphID3 Register + PeriphID3 = 0xFEC, + /// UARTPCellID0 Register + PCellID0 = 0xFF0, + /// UARTPCellID1 Register + PCellID1 = 0xFF4, + /// UARTPCellID2 Register + PCellID2 = 0xFF8, + /// UARTPCellID3 Register + PCellID3 = 0xFFC, +} + +const CR_RX_ENABLE: u32 = 0x200; +const CR_TX_ENABLE: u32 = 0x100; +const CR_UART_ENABLE: u32 = 1; +const LCR_H_FIFO_EN: u32 = 0x10; +const LCR_H_8BITS: u32 = 0x60; + +const _FR_TX_EMPTY: u32 = 0x080; +const _FR_RX_FULL: u32 = 0x040; +const FR_TX_FULL: u32 = 0x020; +const _FR_RX_EMPTY: u32 = 0x010; +const FR_BUSY: u32 = 0x008; + +/// The Hyper-V PL011 host emulated PL011's are found at these +/// base addresses. Should come from ACPI or DT of course yet +/// due to having been hardcoded in some products makes that +/// virtually constants. +const PL011_HYPER_V_BASE_1: u64 = 0xeffec000; +const _PL011_HYPER_V_BASE_2: u64 = 0xeffeb000; +const PL011_BASE: u64 = PL011_HYPER_V_BASE_1; + +fn read_register(reg: Pl011Register) -> u32 { + // SAFETY: using the PL011 MMIO address. + unsafe { core::ptr::read_volatile((PL011_BASE + reg as u64) as *const u32) } +} + +fn write_register(reg: Pl011Register, val: u32) { + // SAFETY: using the PL011 MMIO address. + unsafe { + core::ptr::write_volatile((PL011_BASE + reg as u64) as *mut u32, val); + } +} + +fn cell_id() -> u32 { + // This can easily be rewritten employing + // bare arithmetic yet the compiler does a very good job + // so using the domain abstractions. + [ + Pl011Register::PCellID3, + Pl011Register::PCellID2, + Pl011Register::PCellID1, + Pl011Register::PCellID0, + ] + .iter() + .fold(0, |id_running, &r| { + id_running.wrapping_shl(8) | (read_register(r) as u8 as u32) + }) +} + +fn periph_id() -> u32 { + // This can easily be rewritten employing + // bare arithmetic yet the compiler does a very good job + // so using the domain abstractions. + [ + Pl011Register::PeriphID3, + Pl011Register::PeriphID2, + Pl011Register::PeriphID1, + Pl011Register::PeriphID0, + ] + .iter() + .fold(0, |id_running, &r| { + id_running.wrapping_shl(8) | (read_register(r) as u8 as u32) + }) +} + +fn poll_tx_not_full() { + while read_register(Pl011Register::Fr) & FR_TX_FULL != 0 { + spin_loop(); + } +} + +fn poll_not_busy() { + while read_register(Pl011Register::Fr) & FR_BUSY != 0 { + spin_loop(); + } +} + +/// Disables the functional parts of the UART, drains FIFOs, +/// sets baud rate and enables the UART in the polling mode. +/// Might be geared towards the real hardware more than the virtual one. +/// Works with qemu and Hyper-V. +fn reset_and_init() { + // Mask interrupts (lower 11 bits) + write_register(Pl011Register::Imsc, 0x7ff); + // Clear interrupts (lower 11 bits) + write_register(Pl011Register::Icr, 0x7ff); + // Disable DMA on Rx and Tx + write_register(Pl011Register::DmaCr, 0x0); + + // Leave Rx and Tx enabled to drain FIFOs. + write_register(Pl011Register::Cr, CR_RX_ENABLE | CR_TX_ENABLE); + read_register(Pl011Register::Cr); // wait + read_register(Pl011Register::Cr); // wait + poll_not_busy(); + + // Disable Rx, Tx, and UART. + write_register(Pl011Register::Cr, 0x00000000); + + // Set integer and fractional parts of the baud rate, + // hardcoded for now + write_register(Pl011Register::Fbrd, 0x00000004); + write_register(Pl011Register::Ibrd, 0x00000027); + // The UARTLCR_H, UARTIBRD, and UARTFBRD registers form the single 30-bit + // wide UARTLCR Register that is updated on a single write strobe generated by a + // UARTLCR_H write + write_register(Pl011Register::LcrHigh, LCR_H_FIFO_EN | LCR_H_8BITS); + + // Clear the errors + write_register(Pl011Register::RsrOrEcr, 0); + + // Enable Tx and Rx + write_register(Pl011Register::Cr, CR_RX_ENABLE | CR_TX_ENABLE); + read_register(Pl011Register::Cr); // wait + read_register(Pl011Register::Cr); // wait + poll_not_busy(); + + // Enable UART + write_register( + Pl011Register::Cr, + CR_RX_ENABLE | CR_TX_ENABLE | CR_UART_ENABLE, + ); + poll_not_busy(); +} + +/// A PL011 serial port. +pub struct Serial; + +static SUPPORTED: AtomicBool = AtomicBool::new(false); + +impl Serial { + /// Initializes the serial port. + pub fn init() -> Serial { + const SUPPORTED_PL011_CELLS: &[u32] = &[0xB105_F00D]; + + let cell_id = cell_id(); + let supported = SUPPORTED_PL011_CELLS.contains(&cell_id); + if supported { + reset_and_init(); + } + SUPPORTED.store(supported, Ordering::Relaxed); + + Self + } +} + +impl core::fmt::Write for Serial { + fn write_str(&mut self, s: &str) -> core::fmt::Result { + if !SUPPORTED.load(Ordering::Relaxed) { + return Ok(()); + } + + for byte in s.bytes() { + poll_tx_not_full(); + write_register(Pl011Register::Dr, byte.into()); + } + + Ok(()) + } +} diff --git a/opentmk/src/arch/mod.rs b/opentmk/src/arch/mod.rs new file mode 100644 index 0000000000..4bcf7781ee --- /dev/null +++ b/opentmk/src/arch/mod.rs @@ -0,0 +1,16 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +//! Imports and re-exports architecture-specific implementations. + +mod x86_64; + +cfg_if::cfg_if!( + if #[cfg(target_arch = "x86_64")] { + pub use x86_64::*; + } else if #[cfg(target_arch = "aarch64")] { + pub use aarch64::*; + } else { + compile_error!("target_arch is not supported"); + } +); \ No newline at end of file diff --git a/opentmk/src/arch/x86_64/hypercall.rs b/opentmk/src/arch/x86_64/hypercall.rs new file mode 100644 index 0000000000..1337cbe38e --- /dev/null +++ b/opentmk/src/arch/x86_64/hypercall.rs @@ -0,0 +1,53 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +use core::ptr::addr_of; +use hvdef::HV_PAGE_SIZE; +use minimal_rt::arch::hypercall::HYPERCALL_PAGE; +use minimal_rt::arch::msr::read_msr; +use minimal_rt::arch::msr::write_msr; +#[expect(unsafe_code)] +/// Writes an MSR to tell the hypervisor the OS ID for the boot shim. +fn report_os_id(guest_os_id: u64) { + // SAFETY: Using the contract established in the Hyper-V TLFS. + unsafe { + write_msr(hvdef::HV_X64_MSR_GUEST_OS_ID, guest_os_id); + }; +} + +#[expect(unsafe_code)] +/// Writes an MSR to tell the hypervisor where the hypercall page is +pub fn write_hypercall_msr(enable: bool) { + // SAFETY: Using the contract established in the Hyper-V TLFS. + let hypercall_contents = hvdef::hypercall::MsrHypercallContents::from(unsafe { + read_msr(hvdef::HV_X64_MSR_HYPERCALL) + }); + + let hypercall_page_num = addr_of!(HYPERCALL_PAGE) as u64 / HV_PAGE_SIZE; + + if!(!enable || !hypercall_contents.enable()) { + return; + } + let new_hv_contents: hvdef::hypercall::MsrHypercallContents = hypercall_contents.with_enable(enable).with_gpn(if enable { + hypercall_page_num + } else { + 0 + }); + + // SAFETY: Using the contract established in the Hyper-V TLFS. + unsafe { write_msr(hvdef::HV_X64_MSR_HYPERCALL, new_hv_contents.into()) }; +} + +/// Has to be called before using hypercalls. +pub(crate) fn initialize(guest_os_id: u64) { + // We are assuming we are running under a Microsoft hypervisor, so there is + // no need to check any cpuid leaves. + report_os_id(guest_os_id); + write_hypercall_msr(true); +} + +/// Call before jumping to kernel. +pub(crate) fn uninitialize() { + write_hypercall_msr(false); + report_os_id(0); +} \ No newline at end of file diff --git a/opentmk/src/arch/x86_64/interrupt.rs b/opentmk/src/arch/x86_64/interrupt.rs new file mode 100644 index 0000000000..d9a6ba7993 --- /dev/null +++ b/opentmk/src/arch/x86_64/interrupt.rs @@ -0,0 +1,50 @@ + +use alloc::boxed::Box; +use alloc::sync::Arc; +use x86_64::structures::idt::{InterruptDescriptorTable, InterruptStackFrame}; +use lazy_static::lazy_static; +use core::cell::{Ref, RefCell}; +use core::concat_idents; +use crate::sync::Mutex; + +use crate::{criticallog, infolog}; + +use super::interrupt_handler_register::{register_interrupt_handler, set_common_handler}; + +lazy_static! { + static ref IDT: InterruptDescriptorTable = { + let mut idt = InterruptDescriptorTable::new(); + register_interrupt_handler(&mut idt); + idt.double_fault.set_handler_fn(handler_double_fault); + idt + }; +} + +static mut HANDLERS : [fn(); 256] = [no_op; 256]; +static MUTEX: Mutex<()> = Mutex::new(()); +fn no_op() {} + +fn common_handler(stack_frame: InterruptStackFrame, interrupt: u8) { + unsafe { HANDLERS[interrupt as usize](); } +} + +pub fn set_handler(interrupt: u8, handler: fn()) { + let _lock = MUTEX.lock(); + unsafe { HANDLERS[interrupt as usize] = handler; } +} + + +extern "x86-interrupt" fn handler_double_fault( + stack_frame: InterruptStackFrame, + _error_code: u64, +) -> ! { + criticallog!("EXCEPTION:\n\tERROR_CODE: {}\n\tDOUBLE FAULT\n{:#?}", _error_code, stack_frame); + loop {} +} + +// Initialize the IDT +pub fn init() { + unsafe { IDT.load() }; + set_common_handler(common_handler); + unsafe { x86_64::instructions::interrupts::enable() }; +} \ No newline at end of file diff --git a/opentmk/src/arch/x86_64/interrupt_handler_register.rs b/opentmk/src/arch/x86_64/interrupt_handler_register.rs new file mode 100644 index 0000000000..c015edb666 --- /dev/null +++ b/opentmk/src/arch/x86_64/interrupt_handler_register.rs @@ -0,0 +1,606 @@ +use core::arch::asm; +use x86_64::structures::idt::{InterruptDescriptorTable, InterruptStackFrame}; + +use crate::{infolog, sync::Mutex}; + +static mut COMMON_HANDLER: fn(InterruptStackFrame, u8) = common_handler; +static COMMON_HANDLER_MUTEX: Mutex<()> = Mutex::new(()); + +macro_rules! create_fn { + ($name:ident, $i: expr) => { + extern "x86-interrupt" fn $name(stack_frame: InterruptStackFrame) { + unsafe { + asm!(r#" + push rax + push rbx + push rcx + push rdx + push rsi + push rdi + push rbp + push rsp + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + + sub rsp, 256 + movups [rsp + 16 * 0], xmm0 + movups [rsp + 16 * 1], xmm1 + movups [rsp + 16 * 2], xmm2 + movups [rsp + 16 * 3], xmm3 + movups [rsp + 16 * 4], xmm4 + movups [rsp + 16 * 5], xmm5 + movups [rsp + 16 * 6], xmm6 + movups [rsp + 16 * 7], xmm7 + movups [rsp + 16 * 8], xmm8 + movups [rsp + 16 * 9], xmm9 + movups [rsp + 16 * 10], xmm10 + movups [rsp + 16 * 11], xmm11 + movups [rsp + 16 * 12], xmm12 + movups [rsp + 16 * 13], xmm13 + movups [rsp + 16 * 14], xmm14 + movups [rsp + 16 * 15], xmm15 +"#); + +unsafe { (COMMON_HANDLER)(stack_frame, $i) }; + +asm!(r#" + + + movups xmm0, [rsp + 16 * 0] + movups xmm1, [rsp + 16 * 1] + movups xmm2, [rsp + 16 * 2] + movups xmm3, [rsp + 16 * 3] + movups xmm4, [rsp + 16 * 4] + movups xmm5, [rsp + 16 * 5] + movups xmm6, [rsp + 16 * 6] + movups xmm7, [rsp + 16 * 7] + movups xmm8, [rsp + 16 * 8] + movups xmm9, [rsp + 16 * 9] + movups xmm10, [rsp + 16 * 10] + movups xmm11, [rsp + 16 * 11] + movups xmm12, [rsp + 16 * 12] + movups xmm13, [rsp + 16 * 13] + movups xmm14, [rsp + 16 * 14] + movups xmm15, [rsp + 16 * 15] + add rsp, 16 * 16 + + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop rsp + pop rbp + pop rdi + pop rsi + pop rdx + pop rcx + pop rbx + pop rax + + "#); + } + } + }; +} + +macro_rules! register_interrupt_handler { + ($idt: expr, $i: expr, $name: ident) => { + $idt[$i].set_handler_fn($name); + }; +} + +fn common_handler(stack_frame: InterruptStackFrame, interrupt: u8) { + infolog!("Default interrupt handler fired: {}", interrupt); +} + +pub fn set_common_handler(handler: fn(InterruptStackFrame, u8)) { + let guard = COMMON_HANDLER_MUTEX.lock(); + unsafe { + COMMON_HANDLER = handler; + } +} + +extern "x86-interrupt" fn no_op(stack_frame: InterruptStackFrame) {} + +pub fn register_interrupt_handler(idt: &mut InterruptDescriptorTable) { + register_interrupt_handler!(idt, 0, handler_0); + register_interrupt_handler!(idt, 1, handler_1); + register_interrupt_handler!(idt, 2, handler_2); + register_interrupt_handler!(idt, 3, handler_3); + register_interrupt_handler!(idt, 4, handler_4); + register_interrupt_handler!(idt, 5, handler_5); + register_interrupt_handler!(idt, 6, handler_6); + register_interrupt_handler!(idt, 7, handler_7); + + register_interrupt_handler!(idt, 32, handler_32); + register_interrupt_handler!(idt, 33, handler_33); + register_interrupt_handler!(idt, 34, handler_34); + register_interrupt_handler!(idt, 35, handler_35); + register_interrupt_handler!(idt, 36, handler_36); + register_interrupt_handler!(idt, 37, handler_37); + register_interrupt_handler!(idt, 38, handler_38); + register_interrupt_handler!(idt, 39, handler_39); + register_interrupt_handler!(idt, 40, handler_40); + register_interrupt_handler!(idt, 41, handler_41); + register_interrupt_handler!(idt, 42, handler_42); + register_interrupt_handler!(idt, 43, handler_43); + register_interrupt_handler!(idt, 44, handler_44); + register_interrupt_handler!(idt, 45, handler_45); + register_interrupt_handler!(idt, 46, handler_46); + register_interrupt_handler!(idt, 47, handler_47); + register_interrupt_handler!(idt, 48, handler_48); + register_interrupt_handler!(idt, 49, handler_49); + register_interrupt_handler!(idt, 50, handler_50); + register_interrupt_handler!(idt, 51, handler_51); + register_interrupt_handler!(idt, 52, handler_52); + register_interrupt_handler!(idt, 53, handler_53); + register_interrupt_handler!(idt, 54, handler_54); + register_interrupt_handler!(idt, 55, handler_55); + register_interrupt_handler!(idt, 56, handler_56); + register_interrupt_handler!(idt, 57, handler_57); + register_interrupt_handler!(idt, 58, handler_58); + register_interrupt_handler!(idt, 59, handler_59); + register_interrupt_handler!(idt, 60, handler_60); + register_interrupt_handler!(idt, 61, handler_61); + register_interrupt_handler!(idt, 62, handler_62); + register_interrupt_handler!(idt, 63, handler_63); + register_interrupt_handler!(idt, 64, handler_64); + register_interrupt_handler!(idt, 65, handler_65); + register_interrupt_handler!(idt, 66, handler_66); + register_interrupt_handler!(idt, 67, handler_67); + register_interrupt_handler!(idt, 68, handler_68); + register_interrupt_handler!(idt, 69, handler_69); + register_interrupt_handler!(idt, 70, handler_70); + register_interrupt_handler!(idt, 71, handler_71); + register_interrupt_handler!(idt, 72, handler_72); + register_interrupt_handler!(idt, 73, handler_73); + register_interrupt_handler!(idt, 74, handler_74); + register_interrupt_handler!(idt, 75, handler_75); + register_interrupt_handler!(idt, 76, handler_76); + register_interrupt_handler!(idt, 77, handler_77); + register_interrupt_handler!(idt, 78, handler_78); + register_interrupt_handler!(idt, 79, handler_79); + register_interrupt_handler!(idt, 80, handler_80); + register_interrupt_handler!(idt, 81, handler_81); + register_interrupt_handler!(idt, 82, handler_82); + register_interrupt_handler!(idt, 83, handler_83); + register_interrupt_handler!(idt, 84, handler_84); + register_interrupt_handler!(idt, 85, handler_85); + register_interrupt_handler!(idt, 86, handler_86); + register_interrupt_handler!(idt, 87, handler_87); + register_interrupt_handler!(idt, 88, handler_88); + register_interrupt_handler!(idt, 89, handler_89); + register_interrupt_handler!(idt, 90, handler_90); + register_interrupt_handler!(idt, 91, handler_91); + register_interrupt_handler!(idt, 92, handler_92); + register_interrupt_handler!(idt, 93, handler_93); + register_interrupt_handler!(idt, 94, handler_94); + register_interrupt_handler!(idt, 95, handler_95); + register_interrupt_handler!(idt, 96, handler_96); + register_interrupt_handler!(idt, 97, handler_97); + register_interrupt_handler!(idt, 98, handler_98); + register_interrupt_handler!(idt, 99, handler_99); + register_interrupt_handler!(idt, 100, handler_100); + register_interrupt_handler!(idt, 101, handler_101); + register_interrupt_handler!(idt, 102, handler_102); + register_interrupt_handler!(idt, 103, handler_103); + register_interrupt_handler!(idt, 104, handler_104); + register_interrupt_handler!(idt, 105, handler_105); + register_interrupt_handler!(idt, 106, handler_106); + register_interrupt_handler!(idt, 107, handler_107); + register_interrupt_handler!(idt, 108, handler_108); + register_interrupt_handler!(idt, 109, handler_109); + register_interrupt_handler!(idt, 110, handler_110); + register_interrupt_handler!(idt, 111, handler_111); + register_interrupt_handler!(idt, 112, handler_112); + register_interrupt_handler!(idt, 113, handler_113); + register_interrupt_handler!(idt, 114, handler_114); + register_interrupt_handler!(idt, 115, handler_115); + register_interrupt_handler!(idt, 116, handler_116); + register_interrupt_handler!(idt, 117, handler_117); + register_interrupt_handler!(idt, 118, handler_118); + register_interrupt_handler!(idt, 119, handler_119); + register_interrupt_handler!(idt, 120, handler_120); + register_interrupt_handler!(idt, 121, handler_121); + register_interrupt_handler!(idt, 122, handler_122); + register_interrupt_handler!(idt, 123, handler_123); + register_interrupt_handler!(idt, 124, handler_124); + register_interrupt_handler!(idt, 125, handler_125); + register_interrupt_handler!(idt, 126, handler_126); + register_interrupt_handler!(idt, 127, handler_127); + register_interrupt_handler!(idt, 128, handler_128); + register_interrupt_handler!(idt, 129, handler_129); + register_interrupt_handler!(idt, 130, handler_130); + register_interrupt_handler!(idt, 131, handler_131); + register_interrupt_handler!(idt, 132, handler_132); + register_interrupt_handler!(idt, 133, handler_133); + register_interrupt_handler!(idt, 134, handler_134); + register_interrupt_handler!(idt, 135, handler_135); + register_interrupt_handler!(idt, 136, handler_136); + register_interrupt_handler!(idt, 137, handler_137); + register_interrupt_handler!(idt, 138, handler_138); + register_interrupt_handler!(idt, 139, handler_139); + register_interrupt_handler!(idt, 140, handler_140); + register_interrupt_handler!(idt, 141, handler_141); + register_interrupt_handler!(idt, 142, handler_142); + register_interrupt_handler!(idt, 143, handler_143); + register_interrupt_handler!(idt, 144, handler_144); + register_interrupt_handler!(idt, 145, handler_145); + register_interrupt_handler!(idt, 146, handler_146); + register_interrupt_handler!(idt, 147, handler_147); + register_interrupt_handler!(idt, 148, handler_148); + register_interrupt_handler!(idt, 149, handler_149); + register_interrupt_handler!(idt, 150, handler_150); + register_interrupt_handler!(idt, 151, handler_151); + register_interrupt_handler!(idt, 152, handler_152); + register_interrupt_handler!(idt, 153, handler_153); + register_interrupt_handler!(idt, 154, handler_154); + register_interrupt_handler!(idt, 155, handler_155); + register_interrupt_handler!(idt, 156, handler_156); + register_interrupt_handler!(idt, 157, handler_157); + register_interrupt_handler!(idt, 158, handler_158); + register_interrupt_handler!(idt, 159, handler_159); + register_interrupt_handler!(idt, 160, handler_160); + register_interrupt_handler!(idt, 161, handler_161); + register_interrupt_handler!(idt, 162, handler_162); + register_interrupt_handler!(idt, 163, handler_163); + register_interrupt_handler!(idt, 164, handler_164); + register_interrupt_handler!(idt, 165, handler_165); + register_interrupt_handler!(idt, 166, handler_166); + register_interrupt_handler!(idt, 167, handler_167); + register_interrupt_handler!(idt, 168, handler_168); + register_interrupt_handler!(idt, 169, handler_169); + register_interrupt_handler!(idt, 170, handler_170); + register_interrupt_handler!(idt, 171, handler_171); + register_interrupt_handler!(idt, 172, handler_172); + register_interrupt_handler!(idt, 173, handler_173); + register_interrupt_handler!(idt, 174, handler_174); + register_interrupt_handler!(idt, 175, handler_175); + register_interrupt_handler!(idt, 176, handler_176); + register_interrupt_handler!(idt, 177, handler_177); + register_interrupt_handler!(idt, 178, handler_178); + register_interrupt_handler!(idt, 179, handler_179); + register_interrupt_handler!(idt, 180, handler_180); + register_interrupt_handler!(idt, 181, handler_181); + register_interrupt_handler!(idt, 182, handler_182); + register_interrupt_handler!(idt, 183, handler_183); + register_interrupt_handler!(idt, 184, handler_184); + register_interrupt_handler!(idt, 185, handler_185); + register_interrupt_handler!(idt, 186, handler_186); + register_interrupt_handler!(idt, 187, handler_187); + register_interrupt_handler!(idt, 188, handler_188); + register_interrupt_handler!(idt, 189, handler_189); + register_interrupt_handler!(idt, 190, handler_190); + register_interrupt_handler!(idt, 191, handler_191); + register_interrupt_handler!(idt, 192, handler_192); + register_interrupt_handler!(idt, 193, handler_193); + register_interrupt_handler!(idt, 194, handler_194); + register_interrupt_handler!(idt, 195, handler_195); + register_interrupt_handler!(idt, 196, handler_196); + register_interrupt_handler!(idt, 197, handler_197); + register_interrupt_handler!(idt, 198, handler_198); + register_interrupt_handler!(idt, 199, handler_199); + register_interrupt_handler!(idt, 200, handler_200); + register_interrupt_handler!(idt, 201, handler_201); + register_interrupt_handler!(idt, 202, handler_202); + register_interrupt_handler!(idt, 203, handler_203); + register_interrupt_handler!(idt, 204, handler_204); + register_interrupt_handler!(idt, 205, handler_205); + register_interrupt_handler!(idt, 206, handler_206); + register_interrupt_handler!(idt, 207, handler_207); + register_interrupt_handler!(idt, 208, handler_208); + register_interrupt_handler!(idt, 209, handler_209); + register_interrupt_handler!(idt, 210, handler_210); + register_interrupt_handler!(idt, 211, handler_211); + register_interrupt_handler!(idt, 212, handler_212); + register_interrupt_handler!(idt, 213, handler_213); + register_interrupt_handler!(idt, 214, handler_214); + register_interrupt_handler!(idt, 215, handler_215); + register_interrupt_handler!(idt, 216, handler_216); + register_interrupt_handler!(idt, 217, handler_217); + register_interrupt_handler!(idt, 218, handler_218); + register_interrupt_handler!(idt, 219, handler_219); + register_interrupt_handler!(idt, 220, handler_220); + register_interrupt_handler!(idt, 221, handler_221); + register_interrupt_handler!(idt, 222, handler_222); + register_interrupt_handler!(idt, 223, handler_223); + register_interrupt_handler!(idt, 224, handler_224); + register_interrupt_handler!(idt, 225, handler_225); + register_interrupt_handler!(idt, 226, handler_226); + register_interrupt_handler!(idt, 227, handler_227); + register_interrupt_handler!(idt, 228, handler_228); + register_interrupt_handler!(idt, 229, handler_229); + register_interrupt_handler!(idt, 230, handler_230); + register_interrupt_handler!(idt, 231, handler_231); + register_interrupt_handler!(idt, 232, handler_232); + register_interrupt_handler!(idt, 233, handler_233); + register_interrupt_handler!(idt, 234, handler_234); + register_interrupt_handler!(idt, 235, handler_235); + register_interrupt_handler!(idt, 236, handler_236); + register_interrupt_handler!(idt, 237, handler_237); + register_interrupt_handler!(idt, 238, handler_238); + register_interrupt_handler!(idt, 239, handler_239); + register_interrupt_handler!(idt, 240, handler_240); + register_interrupt_handler!(idt, 241, handler_241); + register_interrupt_handler!(idt, 242, handler_242); + register_interrupt_handler!(idt, 243, handler_243); + register_interrupt_handler!(idt, 244, handler_244); + register_interrupt_handler!(idt, 245, handler_245); + register_interrupt_handler!(idt, 246, handler_246); + register_interrupt_handler!(idt, 247, handler_247); + register_interrupt_handler!(idt, 248, handler_248); + register_interrupt_handler!(idt, 249, handler_249); + register_interrupt_handler!(idt, 250, handler_250); + register_interrupt_handler!(idt, 251, handler_251); + register_interrupt_handler!(idt, 252, handler_252); + register_interrupt_handler!(idt, 253, handler_253); + register_interrupt_handler!(idt, 254, handler_254); + register_interrupt_handler!(idt, 255, handler_255); +} + +create_fn!(handler_0, 0); +create_fn!(handler_1, 1); +create_fn!(handler_2, 2); +create_fn!(handler_3, 3); +create_fn!(handler_4, 4); +create_fn!(handler_5, 5); +create_fn!(handler_6, 6); +create_fn!(handler_7, 7); +create_fn!(handler_8, 8); +create_fn!(handler_9, 9); +create_fn!(handler_10, 10); +create_fn!(handler_11, 11); +create_fn!(handler_12, 12); +create_fn!(handler_13, 13); +create_fn!(handler_14, 14); +create_fn!(handler_15, 15); +create_fn!(handler_16, 16); +create_fn!(handler_17, 17); +create_fn!(handler_18, 18); +create_fn!(handler_19, 19); +create_fn!(handler_20, 20); +create_fn!(handler_21, 21); +create_fn!(handler_22, 22); +create_fn!(handler_23, 23); +create_fn!(handler_24, 24); +create_fn!(handler_25, 25); +create_fn!(handler_26, 26); +create_fn!(handler_27, 27); +create_fn!(handler_28, 28); +create_fn!(handler_29, 29); +create_fn!(handler_30, 30); +create_fn!(handler_31, 31); +create_fn!(handler_32, 32); +create_fn!(handler_33, 33); +create_fn!(handler_34, 34); +create_fn!(handler_35, 35); +create_fn!(handler_36, 36); +create_fn!(handler_37, 37); +create_fn!(handler_38, 38); +create_fn!(handler_39, 39); +create_fn!(handler_40, 40); +create_fn!(handler_41, 41); +create_fn!(handler_42, 42); +create_fn!(handler_43, 43); +create_fn!(handler_44, 44); +create_fn!(handler_45, 45); +create_fn!(handler_46, 46); +create_fn!(handler_47, 47); +create_fn!(handler_48, 48); +create_fn!(handler_49, 49); +create_fn!(handler_50, 50); +create_fn!(handler_51, 51); +create_fn!(handler_52, 52); +create_fn!(handler_53, 53); +create_fn!(handler_54, 54); +create_fn!(handler_55, 55); +create_fn!(handler_56, 56); +create_fn!(handler_57, 57); +create_fn!(handler_58, 58); +create_fn!(handler_59, 59); +create_fn!(handler_60, 60); +create_fn!(handler_61, 61); +create_fn!(handler_62, 62); +create_fn!(handler_63, 63); +create_fn!(handler_64, 64); +create_fn!(handler_65, 65); +create_fn!(handler_66, 66); +create_fn!(handler_67, 67); +create_fn!(handler_68, 68); +create_fn!(handler_69, 69); +create_fn!(handler_70, 70); +create_fn!(handler_71, 71); +create_fn!(handler_72, 72); +create_fn!(handler_73, 73); +create_fn!(handler_74, 74); +create_fn!(handler_75, 75); +create_fn!(handler_76, 76); +create_fn!(handler_77, 77); +create_fn!(handler_78, 78); +create_fn!(handler_79, 79); +create_fn!(handler_80, 80); +create_fn!(handler_81, 81); +create_fn!(handler_82, 82); +create_fn!(handler_83, 83); +create_fn!(handler_84, 84); +create_fn!(handler_85, 85); +create_fn!(handler_86, 86); +create_fn!(handler_87, 87); +create_fn!(handler_88, 88); +create_fn!(handler_89, 89); +create_fn!(handler_90, 90); +create_fn!(handler_91, 91); +create_fn!(handler_92, 92); +create_fn!(handler_93, 93); +create_fn!(handler_94, 94); +create_fn!(handler_95, 95); +create_fn!(handler_96, 96); +create_fn!(handler_97, 97); +create_fn!(handler_98, 98); +create_fn!(handler_99, 99); +create_fn!(handler_100, 100); +create_fn!(handler_101, 101); +create_fn!(handler_102, 102); +create_fn!(handler_103, 103); +create_fn!(handler_104, 104); +create_fn!(handler_105, 105); +create_fn!(handler_106, 106); +create_fn!(handler_107, 107); +create_fn!(handler_108, 108); +create_fn!(handler_109, 109); +create_fn!(handler_110, 110); +create_fn!(handler_111, 111); +create_fn!(handler_112, 112); +create_fn!(handler_113, 113); +create_fn!(handler_114, 114); +create_fn!(handler_115, 115); +create_fn!(handler_116, 116); +create_fn!(handler_117, 117); +create_fn!(handler_118, 118); +create_fn!(handler_119, 119); +create_fn!(handler_120, 120); +create_fn!(handler_121, 121); +create_fn!(handler_122, 122); +create_fn!(handler_123, 123); +create_fn!(handler_124, 124); +create_fn!(handler_125, 125); +create_fn!(handler_126, 126); +create_fn!(handler_127, 127); +create_fn!(handler_128, 128); +create_fn!(handler_129, 129); +create_fn!(handler_130, 130); +create_fn!(handler_131, 131); +create_fn!(handler_132, 132); +create_fn!(handler_133, 133); +create_fn!(handler_134, 134); +create_fn!(handler_135, 135); +create_fn!(handler_136, 136); +create_fn!(handler_137, 137); +create_fn!(handler_138, 138); +create_fn!(handler_139, 139); +create_fn!(handler_140, 140); +create_fn!(handler_141, 141); +create_fn!(handler_142, 142); +create_fn!(handler_143, 143); +create_fn!(handler_144, 144); +create_fn!(handler_145, 145); +create_fn!(handler_146, 146); +create_fn!(handler_147, 147); +create_fn!(handler_148, 148); +create_fn!(handler_149, 149); +create_fn!(handler_150, 150); +create_fn!(handler_151, 151); +create_fn!(handler_152, 152); +create_fn!(handler_153, 153); +create_fn!(handler_154, 154); +create_fn!(handler_155, 155); +create_fn!(handler_156, 156); +create_fn!(handler_157, 157); +create_fn!(handler_158, 158); +create_fn!(handler_159, 159); +create_fn!(handler_160, 160); +create_fn!(handler_161, 161); +create_fn!(handler_162, 162); +create_fn!(handler_163, 163); +create_fn!(handler_164, 164); +create_fn!(handler_165, 165); +create_fn!(handler_166, 166); +create_fn!(handler_167, 167); +create_fn!(handler_168, 168); +create_fn!(handler_169, 169); +create_fn!(handler_170, 170); +create_fn!(handler_171, 171); +create_fn!(handler_172, 172); +create_fn!(handler_173, 173); +create_fn!(handler_174, 174); +create_fn!(handler_175, 175); +create_fn!(handler_176, 176); +create_fn!(handler_177, 177); +create_fn!(handler_178, 178); +create_fn!(handler_179, 179); +create_fn!(handler_180, 180); +create_fn!(handler_181, 181); +create_fn!(handler_182, 182); +create_fn!(handler_183, 183); +create_fn!(handler_184, 184); +create_fn!(handler_185, 185); +create_fn!(handler_186, 186); +create_fn!(handler_187, 187); +create_fn!(handler_188, 188); +create_fn!(handler_189, 189); +create_fn!(handler_190, 190); +create_fn!(handler_191, 191); +create_fn!(handler_192, 192); +create_fn!(handler_193, 193); +create_fn!(handler_194, 194); +create_fn!(handler_195, 195); +create_fn!(handler_196, 196); +create_fn!(handler_197, 197); +create_fn!(handler_198, 198); +create_fn!(handler_199, 199); +create_fn!(handler_200, 200); +create_fn!(handler_201, 201); +create_fn!(handler_202, 202); +create_fn!(handler_203, 203); +create_fn!(handler_204, 204); +create_fn!(handler_205, 205); +create_fn!(handler_206, 206); +create_fn!(handler_207, 207); +create_fn!(handler_208, 208); +create_fn!(handler_209, 209); +create_fn!(handler_210, 210); +create_fn!(handler_211, 211); +create_fn!(handler_212, 212); +create_fn!(handler_213, 213); +create_fn!(handler_214, 214); +create_fn!(handler_215, 215); +create_fn!(handler_216, 216); +create_fn!(handler_217, 217); +create_fn!(handler_218, 218); +create_fn!(handler_219, 219); +create_fn!(handler_220, 220); +create_fn!(handler_221, 221); +create_fn!(handler_222, 222); +create_fn!(handler_223, 223); +create_fn!(handler_224, 224); +create_fn!(handler_225, 225); +create_fn!(handler_226, 226); +create_fn!(handler_227, 227); +create_fn!(handler_228, 228); +create_fn!(handler_229, 229); +create_fn!(handler_230, 230); +create_fn!(handler_231, 231); +create_fn!(handler_232, 232); +create_fn!(handler_233, 233); +create_fn!(handler_234, 234); +create_fn!(handler_235, 235); +create_fn!(handler_236, 236); +create_fn!(handler_237, 237); +create_fn!(handler_238, 238); +create_fn!(handler_239, 239); +create_fn!(handler_240, 240); +create_fn!(handler_241, 241); +create_fn!(handler_242, 242); +create_fn!(handler_243, 243); +create_fn!(handler_244, 244); +create_fn!(handler_245, 245); +create_fn!(handler_246, 246); +create_fn!(handler_247, 247); +create_fn!(handler_248, 248); +create_fn!(handler_249, 249); +create_fn!(handler_250, 250); +create_fn!(handler_251, 251); +create_fn!(handler_252, 252); +create_fn!(handler_253, 253); +create_fn!(handler_254, 254); +create_fn!(handler_255, 255); diff --git a/opentmk/src/arch/x86_64/mod.rs b/opentmk/src/arch/x86_64/mod.rs new file mode 100644 index 0000000000..81cead476e --- /dev/null +++ b/opentmk/src/arch/x86_64/mod.rs @@ -0,0 +1,4 @@ +pub mod hypercall; +pub mod serial; +pub mod interrupt; +mod interrupt_handler_register; \ No newline at end of file diff --git a/opentmk/src/arch/x86_64/serial.rs b/opentmk/src/arch/x86_64/serial.rs new file mode 100644 index 0000000000..250fbd66cf --- /dev/null +++ b/opentmk/src/arch/x86_64/serial.rs @@ -0,0 +1,125 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +//! Serial output for debugging. + +use core::arch::asm; +use core::fmt; +use core::sync::atomic::AtomicBool; +use crate::sync::Mutex; + +const COM4: u16 = 0x2E8; +static mut MUTEX : Mutex<()> = Mutex::new(()); + +/// Write a byte to a port. +/// +/// # Safety +/// +/// The caller must be sure that the given port is safe to write to, and that the +/// given value is safe for it. +unsafe fn outb(port: u16, data: u8) { + // SAFETY: The caller has assured us this is safe. + unsafe { + asm! { + "out dx, al", + in("dx") port, + in("al") data, + } + } +} + +/// Read a byte from a port. +/// +/// # Safety +/// +/// The caller must be sure that the given port is safe to read from. +unsafe fn inb(port: u16) -> u8 { + let mut data; + // SAFETY: The caller has assured us this is safe. + unsafe { + asm! { + "in al, dx", + in("dx") port, + out("al") data, + } + } + data +} + +/// A trait to access io ports used by the serial device. +pub trait IoAccess { + /// Issue an in byte instruction. + /// + /// # Safety + /// + /// The caller must be sure that the given port is safe to read from. + unsafe fn inb(&self, port: u16) -> u8; + /// Issue an out byte instruction. + /// + /// # Safety + /// + /// The caller must be sure that the given port is safe to write to, and that the + /// given value is safe for it. + unsafe fn outb(&self, port: u16, data: u8); +} + +/// A struct to access io ports using in/out instructions. +pub struct InstrIoAccess; + +impl IoAccess for InstrIoAccess { + unsafe fn inb(&self, port: u16) -> u8 { + // SAFETY: The serial port caller has specified a valid port. + unsafe { inb(port) } + } + + unsafe fn outb(&self, port: u16, data: u8) { + // SAFETY: The serial port caller has specified a valid port and data. + unsafe { outb(port, data) } + } +} + +/// A writer for the COM3 UART. +pub struct Serial { + io: T, +} + +impl Serial { + /// Initialize the serial port. + pub fn init(io: T) -> Self { + // SAFETY: Writing these values to the serial device is safe. + unsafe { + io.outb(COM4 + 1, 0x00); // Disable all interrupts + io.outb(COM4 + 2, 0xC7); // Enable FIFO, clear them, with 14-byte threshold + io.outb(COM4 + 4, 0x0F); + } + + Self { io } + } + + /// Create an instance without calling init. + pub const fn new(io: T) -> Self { + Self { io } + } + + fn write_byte(&self, b: u8) { + // SAFETY: Reading and writing text to the serial device is safe. + unsafe { + while self.io.inb(COM4 + 5) & 0x20 == 0 {} + self.io.outb(COM4, b); + } + } +} + + +impl fmt::Write for Serial { + fn write_str(&mut self, s: &str) -> fmt::Result { + let _guard = unsafe { MUTEX.lock() }; + for &b in s.as_bytes() { + if b == b'\n' { + self.write_byte(b'\r'); + } + self.write_byte(b); + } + Ok(()) + } +} diff --git a/opentmk/src/main.rs b/opentmk/src/main.rs new file mode 100644 index 0000000000..019f594f4d --- /dev/null +++ b/opentmk/src/main.rs @@ -0,0 +1,27 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. +#![allow(warnings)] +#![no_std] +#![allow(unsafe_code)] +#![feature(naked_functions)] +#![feature(abi_x86_interrupt)] +#![feature(concat_idents)] + +#![doc = include_str!("../README.md")] +// HACK: workaround for building guest_test_uefi as part of the workspace in CI. +#![cfg_attr(all(not(test), target_os = "uefi"), no_main)] +#![cfg_attr(all(not(test), target_os = "uefi"), no_std)] + +// HACK: workaround for building guest_test_uefi as part of the workspace in CI +// +// Actual entrypoint is `uefi::uefi_main`, via the `#[entry]` macro +#[cfg(any(test, not(target_os = "uefi")))] +fn main() {} + +#[macro_use] +extern crate alloc; + +mod uefi; +pub mod arch; +pub mod slog; +pub mod sync; \ No newline at end of file diff --git a/opentmk/src/slog.rs b/opentmk/src/slog.rs new file mode 100644 index 0000000000..2073788370 --- /dev/null +++ b/opentmk/src/slog.rs @@ -0,0 +1,242 @@ +#![feature(panic_location)] + +use core::any::type_name; +use core::fmt::Write; +use core::result; + +use crate::arch::serial::{InstrIoAccess, Serial}; +use crate::sync::Mutex; +use alloc::string::{String, ToString}; +#[no_std] +use serde_json::json; +use serde::Serialize; +pub enum Level { + DEBUG = 0, + INFO = 1, + WARNING = 2, + ERROR = 3, + CRITICAL = 4, +} + +pub fn get_json_string(s: &String, terminate_new_line: bool, level: Level) -> String { + let out = json!({ + "type:": "log", + "message": s, + "level": match level { + Level::DEBUG => "DEBUG", + Level::INFO => "INFO", + Level::WARNING => "WARNING", + Level::ERROR => "ERROR", + Level::CRITICAL => "CRITICAL", + } + }); + let mut out = out.to_string(); + if terminate_new_line { + out.push('\n'); + } + return out; +} + +pub fn get_json_test_assertion_string( + s: &str, + terminate_new_line: bool, + line: String, + assert_result: bool, + testname: &T, +) -> String where T: Serialize { + let out = json!({ + "type:": "assertion", + "message": s, + "level": "CRITICAL", + "line": line, + "assertion_result": assert_result, + "testname": testname, + }); + let mut out = out.to_string(); + if terminate_new_line { + out.push('\n'); + } + return out; +} + +pub static mut SERIAL: Serial = Serial::new(InstrIoAccess {}); + +#[macro_export] +macro_rules! tmk_assert { + ($condition:expr, $message:expr) => {{ + use core::fmt::Write; + let file = core::file!(); + let line = line!(); + let file_line = format!("{}:{}", file, line); + let expn = stringify!($condition); + let result: bool = $condition; + let js = + crate::slog::get_json_test_assertion_string(&expn, true, file_line, result, &$message); + unsafe { crate::slog::SERIAL.write_str(&js) }; + if !result { + panic!("Assertion failed: {}", $message); + } + }}; +} + +#[macro_export] +macro_rules! logt { + ($($arg:tt)*) => { + { + use core::fmt::Write; + let message = format!($($arg)*); + let js = crate::slog::get_json_string(&message, true, crate::slog::Level::INFO); + unsafe { crate::slog::SERIAL.write_str(&js) }; + } + }; +} + +#[macro_export] +macro_rules! errorlog { + ($($arg:tt)*) => { + { + use core::fmt::Write; + let message = format!($($arg)*); + let js = crate::slog::get_json_string(&message, true, crate::slog::Level::ERROR); + unsafe { crate::slog::SERIAL.write_str(&js) }; + } + }; +} + +#[macro_export] +macro_rules! debuglog { + ($($arg:tt)*) => { + { + use core::fmt::Write; + + let message = format!($($arg)*); + let js = crate::slog::get_json_string(&message, true, crate::slog::Level::DEBUG); + unsafe { crate::slog::SERIAL.write_str(&js) }; + } + }; +} + +#[macro_export] +macro_rules! infolog { + ($($arg:tt)*) => { + { + use core::fmt::Write; + + let message = format!($($arg)*); + let js = crate::slog::get_json_string(&message, true, crate::slog::Level::INFO); + unsafe { crate::slog::SERIAL.write_str(&js) }; + } + }; +} + +#[macro_export] +macro_rules! warninglog { + ($($arg:tt)*) => { + { + use core::fmt::Write; + + let message = format!($($arg)*); + let js = crate::slog::get_json_string(&message, true, crate::slog::Level::WARNING); + unsafe { crate::slog::SERIAL.write_str(&js) }; + } + }; +} + +#[macro_export] +macro_rules! criticallog { + ($($arg:tt)*) => { + { + use core::fmt::Write; + + let message = format!($($arg)*); + let js = crate::slog::get_json_string(&message, true, crate::slog::Level::CRITICAL); + unsafe { crate::slog::SERIAL.write_str(&js) }; + } + }; +} + +#[macro_export] +macro_rules! slog { + + ($serial:expr, $($arg:tt)*) => { + let mut serial : &mut Mutex> = &mut $serial; + let message = format!($($arg)*); + let js = slog::get_json_string(&message, true, crate::slog::Level::INFO); + { + let mut serial = serial.lock(); + serial.write_str(&js); + } + }; + +} + +pub trait AssertResult { + fn unpack_assert(self) -> T; + fn expect_assert(self, message: &str) -> T; +} + +pub trait AssertOption { + fn expect_assert(self, message: &str) -> T; +} + +impl AssertOption for Option { + fn expect_assert(self, message: &str) -> T { + match self { + Some(value) => value, + None => { + let call: &core::panic::Location<'_> = core::panic::Location::caller(); + let file_line = format!("{}:{}", call.file(), call.line()); + let expn = type_name::>(); + let js = crate::slog::get_json_test_assertion_string( + expn, true, file_line, false, &message, + ); + unsafe { crate::slog::SERIAL.write_str(&js) }; + panic!("Assertion failed: {}", message); + } + } + } +} + +impl AssertResult for Result +where + E: core::fmt::Debug, +{ + fn unpack_assert(self) -> T { + match self { + Ok(value) => value, + Err(err) => { + let call: &core::panic::Location<'_> = core::panic::Location::caller(); + let file_line = format!("{}:{}", call.file(), call.line()); + let expn = type_name::>(); + let js = crate::slog::get_json_test_assertion_string( + expn, + true, + file_line, + false, + &"ResultTest", + ); + unsafe { crate::slog::SERIAL.write_str(&js) }; + panic!("Assertion failed: {:?}", err); + } + } + } + fn expect_assert(self, message: &str) -> T { + match self { + Ok(value) => { + infolog!("result is ok, condition not met for: {}", message); + value + } + Err(err) => { + let call: &core::panic::Location<'_> = core::panic::Location::caller(); + let file_line = format!("{}:{}", call.file(), call.line()); + let expn = type_name::>(); + let js = crate::slog::get_json_test_assertion_string( + expn, true, file_line, false, &message, + ); + unsafe { crate::slog::SERIAL.write_str(&js) }; + + panic!("Assertion failed: {:?}", err); + } + } + } +} diff --git a/opentmk/src/sync.rs b/opentmk/src/sync.rs new file mode 100644 index 0000000000..a26233bc76 --- /dev/null +++ b/opentmk/src/sync.rs @@ -0,0 +1,490 @@ +use core::{arch::asm, cell::{RefCell, UnsafeCell}, fmt::Error, sync::atomic::{AtomicBool, AtomicUsize, Ordering}}; +pub use spin::Mutex; +use alloc::{boxed::Box, string::{String, ToString}, sync::Arc, vec::Vec}; +use alloc::collections::VecDeque; + +use crate::infolog; + +// pub struct LazyLock { +// lock: AtomicBool, +// init: fn() -> T, +// val: Option>, +// } + +// impl LazyLock { +// pub fn new(init: fn() -> T) -> Self { +// LazyLock { +// lock: AtomicBool::new(false), +// init, +// val: None, +// } +// } + +// pub fn get(&mut self) -> &T { +// if let ok = self.lock.get_mut() { +// if *ok { +// self.val = Some(RefCell::new((self.init)())); + +// } +// } +// if let Some(ref val) = self.val { +// return &val.borrow(); +// } +// panic!("LazyLock not initialized"); +// } + +// pub fn get_mut(&mut self) -> &mut T { +// if let ok = self.lock.get_mut() { +// if ok { +// self.val = Some((self.init)()); +// } +// } +// &mut self.val.unwrap() +// } +// } + +// pub struct Mutex { +// lock: AtomicBool, +// data: UnsafeCell, +// } + +// unsafe impl Sync for Mutex {} + +// impl Mutex { +// pub const fn new(data: T) -> Self { +// Mutex { +// lock: AtomicBool::new(false), +// data: UnsafeCell::new(data), +// } +// } + +// pub fn lock<'a>(&'a self) -> MutexGuard<'a, T> { +// while self.lock.compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed).is_err() { +// // Busy-wait until the lock is acquired +// core::hint::spin_loop(); +// } +// MutexGuard { mutex: self } +// } + +// pub fn unlock(&self) { +// self.lock.store(false, Ordering::Release); +// } +// } + +// pub struct MutexGuard<'a, T> { +// mutex: &'a Mutex, +// } + +// impl<'a, T> Drop for MutexGuard<'a, T> { +// fn drop(&mut self) { +// self.mutex.unlock(); +// } +// } + +// impl<'a, T> core::ops::Deref for MutexGuard<'a, T> { +// type Target = T; + +// fn deref(&self) -> &Self::Target { +// unsafe { &*self.mutex.data.get() } +// } +// } + +// impl<'a, T> core::ops::DerefMut for MutexGuard<'a, T> { +// fn deref_mut(&mut self) -> &mut Self::Target { +// unsafe { &mut *self.mutex.data.get() } +// } +// } + +#[derive(Debug)] +pub struct RingBuffer { + buffer: Vec>, + capacity: usize, + head: usize, + tail: usize, + size: usize, +} + +impl RingBuffer { + pub fn new(capacity: usize) -> Self { + RingBuffer { + buffer: Vec::with_capacity(capacity), + capacity, + head: 0, + tail: 0, + size: 0, + } + } + + fn is_empty(&self) -> bool { + self.size == 0 + } + + fn is_full(&self) -> bool { + self.size == self.capacity + } + + pub fn push(&mut self, item: T) -> Result<(), String> { + if self.is_full() { + return Err("Buffer is full".to_string()); + } + + if self.tail == self.buffer.len() { + self.buffer.push(Some(item)); + } else { + self.buffer[self.tail] = Some(item); + } + + self.tail = (self.tail + 1) % self.capacity; + self.size += 1; + + Ok(()) + } + + pub fn pop(&mut self) -> Option { + if self.is_empty() { + return None; + } + + let item = core::mem::replace(&mut self.buffer[self.head], None); + self.head = (self.head + 1) % self.capacity; + self.size -= 1; + + Some(item.unwrap()) + } + + pub fn len(&self) -> usize { + self.size + } +} + + + + +#[cfg(feature = "std")] +use std::error::Error; +use core::fmt; + +/// An unbounded channel implementation with priority send capability. +/// This implementation works in no_std environments using spin-rs. +/// It uses a VecDeque as the underlying buffer and ensures non-blocking operations. +pub struct Channel { + inner: Arc>, +} + +/// The inner data structure holding the channel state +struct ChannelInner { + /// The internal buffer using a VecDeque protected by its own mutex + buffer: Mutex>, + + /// Number of active senders + senders: AtomicUsize, + + /// Number of active receivers + receivers: AtomicUsize, +} + +unsafe impl Send for ChannelInner {} +unsafe impl Sync for ChannelInner {} + +/// Error type for sending operations +#[derive(Debug, Eq, PartialEq)] +pub enum SendError { + /// All receivers have been dropped + Disconnected(T), +} + +impl fmt::Display for SendError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + SendError::Disconnected(_) => write!(f, "send failed because receiver is disconnected"), + } + } +} + +#[cfg(feature = "std")] +impl Error for SendError {} + +/// Error type for receiving operations +#[derive(Debug, Eq, PartialEq)] +pub enum RecvError { + /// Channel is empty + Empty, + /// All senders have been dropped + Disconnected, +} + +impl fmt::Display for RecvError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + RecvError::Empty => write!(f, "receive failed because channel is empty"), + RecvError::Disconnected => write!(f, "receive failed because sender is disconnected"), + } + } +} + +#[cfg(feature = "std")] +impl Error for RecvError {} + +/// Sender half of the channel +pub struct Sender { + inner: Arc>, +} + +/// Receiver half of the channel +pub struct Receiver { + inner: Arc>, +} + +// implement clone for Sender +impl Clone for Sender { + fn clone(&self) -> Self { + self.inner.senders.fetch_add(1, Ordering::SeqCst); + Sender { + inner: self.inner.clone(), + } + } +} + +// implement clone for Receiver +impl Clone for Receiver { + fn clone(&self) -> Self { + self.inner.receivers.fetch_add(1, Ordering::SeqCst); + Receiver { + inner: self.inner.clone(), + } + } +} + +impl Channel { + /// Creates a new unbounded channel + pub fn new() -> Self { + let inner = Arc::new(ChannelInner { + buffer: Mutex::new(VecDeque::new()), + senders: AtomicUsize::new(1), // Start with one sender + receivers: AtomicUsize::new(1), // Start with one receiver + }); + + Self { inner } + } + + /// Splits the channel into a sender and receiver pair + pub fn split(self) -> (Sender, Receiver) { + let sender = Sender { + inner: self.inner.clone(), + }; + + let receiver = Receiver { + inner: self.inner, + }; + + (sender, receiver) + } + + /// Returns the current number of elements in the channel + pub fn len(&self) -> usize { + self.inner.buffer.lock().len() + } + + /// Returns true if the channel is empty + pub fn is_empty(&self) -> bool { + self.inner.buffer.lock().is_empty() + } +} + +impl Sender { + /// Sends an element to the back of the queue + /// Returns Ok(()) if successful, Err(SendError) if all receivers have been dropped + pub fn send(&self, value: T) -> Result<(), SendError> { + // Check if there are any receivers left + if self.inner.receivers.load(Ordering::SeqCst) == 0 { + return Err(SendError::Disconnected(value)); + } + + // Lock the buffer - only locked during the actual send operation + let mut buffer = self.inner.buffer.lock(); + + // Check again after locking + if self.inner.receivers.load(Ordering::SeqCst) == 0 { + return Err(SendError::Disconnected(value)); + } + + // Push to the back of the queue - can't fail since we're unbounded + buffer.push_back(value); + + Ok(()) + } + + /// Sends an element to the front of the queue (highest priority) + /// Returns Ok(()) if successful, Err(SendError) if all receivers have been dropped + pub fn send_priority(&self, value: T) -> Result<(), SendError> { + // Check if there are any receivers left + if self.inner.receivers.load(Ordering::SeqCst) == 0 { + return Err(SendError::Disconnected(value)); + } + + // Lock the buffer - only locked during the actual send operation + let mut buffer = self.inner.buffer.lock(); + + // Check again after locking + if self.inner.receivers.load(Ordering::SeqCst) == 0 { + return Err(SendError::Disconnected(value)); + } + + // Push to the front of the queue - can't fail since we're unbounded + buffer.push_front(value); + + Ok(()) + } + + /// Send a batch of elements at once + /// Returns the number of elements successfully sent (all of them, unless disconnected) + pub fn send_batch(&self, items: I) -> usize + where + I: IntoIterator, + { + // Check if there are any receivers left + if self.inner.receivers.load(Ordering::SeqCst) == 0 { + return 0; + } + + // Lock the buffer once for the entire batch + let mut buffer = self.inner.buffer.lock(); + + // Check again after locking + if self.inner.receivers.load(Ordering::SeqCst) == 0 { + return 0; + } + + let mut count = 0; + + // Push each item to the back of the queue + for item in items { + buffer.push_back(item); + count += 1; + } + + count + } + + /// Returns the current number of elements in the channel + pub fn len(&self) -> usize { + self.inner.buffer.lock().len() + } + + /// Returns true if the channel is empty + pub fn is_empty(&self) -> bool { + self.inner.buffer.lock().is_empty() + } +} + +impl Receiver { + /// Tries to receive an element from the front of the queue without blocking + /// Returns Ok(value) if successful, Err(RecvError) otherwise + pub fn recv(&self) -> Result { + loop { + match self.try_recv() { + Ok(value) => return Ok(value), + Err(RecvError::Empty) => { + // Yield to the scheduler and try again + continue; + }, + Err(err) => return Err(err), + } + } + } + + /// Tries to receive an element from the front of the queue without blocking + /// Returns Ok(value) if successful, Err(RecvError) otherwise + pub fn try_recv(&self) -> Result { + // Use a separate scope for the lock to ensure it's released promptly + let result = { + let mut buffer = self.inner.buffer.lock(); + buffer.pop_front() + }; + + match result { + Some(val) => Ok(val), + None => { + // Check if there are any senders left + if self.inner.senders.load(Ordering::SeqCst) == 0 { + Err(RecvError::Disconnected) + } else { + Err(RecvError::Empty) + } + } + } + } + + + /// Tries to receive multiple elements at once, up to the specified limit + /// Returns a vector of received elements + pub fn recv_batch(&self, max_items: usize) -> Vec + where + T: Send, + { + // If max_items is 0, return an empty vector + if max_items == 0 { + return Vec::new(); + } + + let mut items = Vec::new(); + + // Lock the buffer once for the entire batch + let mut buffer = self.inner.buffer.lock(); + + // Calculate how many items to take + let count = max_items.min(buffer.len()); + + // Reserve capacity for efficiency + items.reserve(count); + + // Take items from the front of the queue + for _ in 0..count { + if let Some(item) = buffer.pop_front() { + items.push(item); + } else { + // This shouldn't happen due to the min() above, but just in case + break; + } + } + + items + } + + /// Peeks at the next element without removing it + pub fn peek(&self) -> Option + where + T: Clone, + { + let buffer = self.inner.buffer.lock(); + buffer.front().cloned() + } + + /// Returns the current number of elements in the channel + pub fn len(&self) -> usize { + self.inner.buffer.lock().len() + } + + /// Returns true if the channel is empty + pub fn is_empty(&self) -> bool { + self.inner.buffer.lock().is_empty() + } +} + +impl Drop for Sender { + fn drop(&mut self) { + self.inner.senders.fetch_sub(1, Ordering::SeqCst); + } +} + +impl Drop for Receiver { + fn drop(&mut self) { + self.inner.receivers.fetch_sub(1, Ordering::SeqCst); + } +} + +impl Default for Channel { + fn default() -> Self { + Self::new() + } +} \ No newline at end of file diff --git a/opentmk/src/uefi/alloc.rs b/opentmk/src/uefi/alloc.rs new file mode 100644 index 0000000000..92619fdee5 --- /dev/null +++ b/opentmk/src/uefi/alloc.rs @@ -0,0 +1,86 @@ +use core::{alloc::GlobalAlloc, cell::RefCell}; + +use linked_list_allocator::LockedHeap; +use spin::mutex::Mutex; +use uefi::{allocator::Allocator, boot::{self, AllocateType, MemoryType}}; + +pub const SIZE_1MB: usize = 1024 * 1024; + +#[global_allocator] +pub static ALLOCATOR: MemoryAllocator = MemoryAllocator { + use_locked_heap: Mutex::new(RefCell::new(false)), + locked_heap: LockedHeap::empty(), + uefi_allocator: Allocator{}, +}; + +pub struct MemoryAllocator { + use_locked_heap: Mutex>, + locked_heap: LockedHeap, + uefi_allocator: Allocator, +} + +#[expect(unsafe_code)] +unsafe impl GlobalAlloc for MemoryAllocator { + #[allow(unsafe_code)] + unsafe fn alloc(&self, layout: core::alloc::Layout) -> *mut u8 { + if *self.use_locked_heap.lock().borrow() { + unsafe { self.locked_heap.alloc(layout) } + } else { + unsafe { self.uefi_allocator.alloc(layout) } + } + } + + unsafe fn dealloc(&self, ptr: *mut u8, layout: core::alloc::Layout) { + if *self.use_locked_heap.lock().borrow() { + unsafe { self.locked_heap.dealloc(ptr, layout) } + } else { + unsafe { self.uefi_allocator.dealloc(ptr, layout) } + } + } + + unsafe fn alloc_zeroed(&self, layout: core::alloc::Layout) -> *mut u8 { + if *self.use_locked_heap.lock().borrow() { + unsafe { self.locked_heap.alloc_zeroed(layout) } + } else { + unsafe { self.uefi_allocator.alloc_zeroed(layout) } + } + } + + unsafe fn realloc(&self, ptr: *mut u8, layout: core::alloc::Layout, new_size: usize) -> *mut u8 { + if *self.use_locked_heap.lock().borrow() { + unsafe { self.locked_heap.realloc(ptr, layout, new_size) } + } else { + unsafe { self.uefi_allocator.realloc(ptr, layout, new_size) } + } + } +} + +impl MemoryAllocator { + + #[expect(unsafe_code)] + pub unsafe fn init(&self, size: usize) -> bool { + let pages = ((SIZE_1MB * size) / 4096) + 1; + let size = pages * 4096; + let mem: Result, uefi::Error> = boot::allocate_pages(AllocateType::AnyPages, MemoryType::BOOT_SERVICES_DATA, pages); + if mem.is_err() { + return false; + } + let ptr = mem.unwrap().as_ptr(); + unsafe { + self.locked_heap.lock().init(ptr, size); + } + *self.use_locked_heap.lock().borrow_mut() = true; + return true; + } + + pub fn get_page_alligned_memory(&self, size: usize) -> *mut u8 { + let pages = ((SIZE_1MB * size) / 4096) + 1; + let size = pages * 4096; + let mem: Result, uefi::Error> = boot::allocate_pages(AllocateType::AnyPages, MemoryType::BOOT_SERVICES_DATA, pages); + if mem.is_err() { + return core::ptr::null_mut(); + } + let ptr = mem.unwrap().as_ptr(); + return ptr; + } +} diff --git a/opentmk/src/uefi/context.rs b/opentmk/src/uefi/context.rs new file mode 100644 index 0000000000..b9e5da65d6 --- /dev/null +++ b/opentmk/src/uefi/context.rs @@ -0,0 +1,61 @@ +use core::ops::Range; + +use alloc::boxed::Box; +use hvdef::Vtl; + + + +pub trait TestCtxTrait { + fn get_vp_count(&self) -> u32; + fn get_current_vp(&self) -> u32; + fn get_current_vtl(&self) -> Vtl; + + fn start_on_vp(&mut self, cmd: VpExecutor); + + fn queue_command_vp(&mut self, cmd: VpExecutor); + + fn switch_to_high_vtl(&mut self); + fn switch_to_low_vtl(&mut self); + + fn setup_partition_vtl(&mut self, vtl: Vtl); + fn setup_interrupt_handler(&mut self); + fn set_interupt_idx(&mut self, interrupt_idx: u8, handler: fn()); + + fn setup_vtl_protection(&mut self); + fn setup_secure_intercept(&mut self, interrupt_idx: u8); + fn apply_vtl_protection_for_memory(&mut self, range: Range, vtl: Vtl); + fn write_msr(&mut self, msr: u32, value: u64); + fn read_msr(&mut self, msr: u32) -> u64; + + fn start_running_vp_with_default_context(&mut self, cmd: VpExecutor); + fn set_default_ctx_to_vp(&mut self, vp_index: u32, vtl: Vtl); + fn enable_vp_vtl_with_default_context(&mut self, vp_index: u32, vtl: Vtl); + + fn get_register(&mut self, reg: u32) -> u128; +} + +pub struct VpExecutor { + vp_index: u32, + vtl: Vtl, + cmd: Option>, +} + +impl VpExecutor { + pub fn new(vp_index: u32, vtl: Vtl) -> Self { + VpExecutor { + vp_index, + vtl, + cmd: None, + } + } + + pub fn command(mut self, cmd: impl FnOnce(&mut dyn TestCtxTrait) + 'static) -> Self { + self.cmd = Some(Box::new(cmd)); + self + } + + pub fn get(mut self) -> (u32, Vtl, Option>) { + let cmd = self.cmd.take(); + (self.vp_index, self.vtl, cmd) + } +} \ No newline at end of file diff --git a/opentmk/src/uefi/hypercall.rs b/opentmk/src/uefi/hypercall.rs new file mode 100644 index 0000000000..3ffe783e70 --- /dev/null +++ b/opentmk/src/uefi/hypercall.rs @@ -0,0 +1,604 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +//! Hypercall infrastructure. + +use arrayvec::ArrayVec; +use hvdef::hypercall::EnablePartitionVtlFlags; +use hvdef::hypercall::InitialVpContextX64; +use hvdef::HvInterruptType; +use hvdef::HvRegisterGuestVsmPartitionConfig; +use hvdef::HvRegisterValue; +use hvdef::HvRegisterVsmPartitionConfig; +use hvdef::HvX64RegisterName; +use minimal_rt::arch::hypercall::{invoke_hypercall_vtl}; +use zerocopy::FromZeros; +use core::arch; +use core::cell::RefCell; +use core::cell::UnsafeCell; +use core::mem::size_of; +use hvdef::hypercall::HvInputVtl; +use hvdef::Vtl; +use hvdef::HV_PAGE_SIZE; +use memory_range::MemoryRange; +use minimal_rt::arch::hypercall::invoke_hypercall; +use zerocopy::IntoBytes; +use zerocopy::FromBytes; + +/// Page-aligned, page-sized buffer for use with hypercalls +#[repr(C, align(4096))] +struct HvcallPage { + buffer: [u8; HV_PAGE_SIZE as usize], + +} + +impl HvcallPage { + pub const fn new() -> Self { + HvcallPage { + buffer: [0; HV_PAGE_SIZE as usize], + + } + } + + /// Address of the hypercall page. + fn address(&self) -> u64 { + let addr = self.buffer.as_ptr() as u64; + + // These should be page-aligned + assert!(addr % HV_PAGE_SIZE == 0); + + addr + } +} + +/// Provides mechanisms to invoke hypercalls within the boot shim. +/// Internally uses static buffers for the hypercall page, the input +/// page, and the output page, so this should not be used in any +/// multi-threaded capacity (which the boot shim currently is not). +pub struct HvCall { + initialized: bool, + pub vtl: Vtl, + input_page: HvcallPage, + output_page: HvcallPage, +} + +/// Returns an [`HvCall`] instance. +/// +/// Panics if another instance is already in use. +// #[track_caller] +// pub fn hvcall() -> core::cell::RefMut<'static, HvCall> { +// HVCALL.borrow_mut() +// } + +#[expect(unsafe_code)] +impl HvCall { + pub const fn new() -> Self { + // SAFETY: The caller must ensure that this is only called once. + unsafe { + HvCall { + initialized: false, + vtl: Vtl::Vtl0, + input_page: HvcallPage::new(), + output_page: HvcallPage::new(), + } + } + + } + fn input_page(&mut self) -> &mut HvcallPage { + &mut self.input_page + } + + fn output_page(&mut self) -> &mut HvcallPage { + &mut self.output_page + } + + /// Returns the address of the hypercall page, mapping it first if + /// necessary. + #[cfg(target_arch = "x86_64")] + pub fn hypercall_page(&mut self) -> u64 { + self.init_if_needed(); + core::ptr::addr_of!(minimal_rt::arch::hypercall::HYPERCALL_PAGE) as u64 + } + + fn init_if_needed(&mut self) { + if !self.initialized { + self.initialize(); + } + } + + pub fn initialize(&mut self) { + assert!(!self.initialized); + + // TODO: revisit os id value. For now, use 1 (which is what UEFI does) + let guest_os_id = hvdef::hypercall::HvGuestOsMicrosoft::new().with_os_id(1); + crate::arch::hypercall::initialize(guest_os_id.into()); + self.initialized = true; + + self.vtl = self + .get_register(hvdef::HvAllArchRegisterName::VsmVpStatus.into(), None) + .map_or(Vtl::Vtl0, |status| { + hvdef::HvRegisterVsmVpStatus::from(status.as_u64()) + .active_vtl() + .try_into() + .unwrap() + }); + } + + /// Call before jumping to kernel. + pub fn uninitialize(&mut self) { + if self.initialized { + crate::arch::hypercall::uninitialize(); + self.initialized = false; + } + } + + /// Returns the environment's VTL. + pub fn vtl(&self) -> Vtl { + assert!(self.initialized); + self.vtl + } + + /// Makes a hypercall. + /// rep_count is Some for rep hypercalls + fn dispatch_hvcall( + &mut self, + code: hvdef::HypercallCode, + rep_count: Option, + ) -> hvdef::hypercall::HypercallOutput { + self.init_if_needed(); + + let control: hvdef::hypercall::Control = hvdef::hypercall::Control::new() + .with_code(code.0) + .with_rep_count(rep_count.unwrap_or_default()); + + // SAFETY: Invoking hypercall per TLFS spec + unsafe { + invoke_hypercall( + control, + self.input_page().address(), + self.output_page().address(), + ) + } + } + + + pub fn set_vp_registers( + &mut self, + vp: u32, + vtl: Option, + vp_context : Option, + ) -> Result<(), hvdef::HvError> { + const HEADER_SIZE: usize = size_of::(); + + let header = hvdef::hypercall::GetSetVpRegisters { + partition_id: hvdef::HV_PARTITION_ID_SELF, + vp_index: vp, + target_vtl: vtl.unwrap_or(HvInputVtl::CURRENT_VTL), + rsvd: [0; 3], + }; + + header.write_to_prefix(self.input_page().buffer.as_mut_slice()); + + let mut input_offset = HEADER_SIZE; + + let mut count = 0; + let mut write_reg = |reg_name: hvdef::HvRegisterName, reg_value: hvdef::HvRegisterValue| { + let reg = hvdef::hypercall::HvRegisterAssoc { + name: reg_name, + pad: Default::default(), + value: reg_value, + }; + + reg.write_to_prefix(&mut self.input_page().buffer[input_offset..]); + + input_offset += size_of::(); + count += 1; + }; + // pub msr_cr_pat: u64, + + write_reg(hvdef::HvX64RegisterName::Cr0.into(), vp_context.unwrap().cr0.into()); + write_reg(hvdef::HvX64RegisterName::Cr3.into(), vp_context.unwrap().cr3.into()); + write_reg(hvdef::HvX64RegisterName::Cr4.into(), vp_context.unwrap().cr4.into()); + write_reg(hvdef::HvX64RegisterName::Rip.into(), vp_context.unwrap().rip.into()); + write_reg(hvdef::HvX64RegisterName::Rsp.into(), vp_context.unwrap().rsp.into()); + write_reg(hvdef::HvX64RegisterName::Rflags.into(), vp_context.unwrap().rflags.into()); + write_reg(hvdef::HvX64RegisterName::Cs.into(), vp_context.unwrap().cs.into()); + write_reg(hvdef::HvX64RegisterName::Ss.into(), vp_context.unwrap().ss.into()); + write_reg(hvdef::HvX64RegisterName::Ds.into(), vp_context.unwrap().ds.into()); + write_reg(hvdef::HvX64RegisterName::Es.into(), vp_context.unwrap().es.into()); + write_reg(hvdef::HvX64RegisterName::Fs.into(), vp_context.unwrap().fs.into()); + write_reg(hvdef::HvX64RegisterName::Gs.into(), vp_context.unwrap().gs.into()); + write_reg(hvdef::HvX64RegisterName::Gdtr.into(), vp_context.unwrap().gdtr.into()); + write_reg(hvdef::HvX64RegisterName::Idtr.into(), vp_context.unwrap().idtr.into()); + write_reg(hvdef::HvX64RegisterName::Ldtr.into(), vp_context.unwrap().ldtr.into()); + write_reg(hvdef::HvX64RegisterName::Tr.into(), vp_context.unwrap().tr.into()); + write_reg(hvdef::HvX64RegisterName::Efer.into(), vp_context.unwrap().efer.into()); + + let output = self.dispatch_hvcall(hvdef::HypercallCode::HvCallSetVpRegisters, Some(count)); + + output.result() + } + + + /// Hypercall for setting a register to a value. + pub fn set_register( + &mut self, + name: hvdef::HvRegisterName, + value: hvdef::HvRegisterValue, + vtl: Option + ) -> Result<(), hvdef::HvError> { + const HEADER_SIZE: usize = size_of::(); + + let header = hvdef::hypercall::GetSetVpRegisters { + partition_id: hvdef::HV_PARTITION_ID_SELF, + vp_index: hvdef::HV_VP_INDEX_SELF, + target_vtl: vtl.unwrap_or(HvInputVtl::CURRENT_VTL), + rsvd: [0; 3], + }; + + header.write_to_prefix(self.input_page().buffer.as_mut_slice()); + + let reg = hvdef::hypercall::HvRegisterAssoc { + name, + pad: Default::default(), + value, + }; + + reg.write_to_prefix(&mut self.input_page().buffer[HEADER_SIZE..]); + + let output = self.dispatch_hvcall(hvdef::HypercallCode::HvCallSetVpRegisters, Some(1)); + + output.result() + } + + /// Hypercall for setting a register to a value. + pub fn get_register( + &mut self, + name: hvdef::HvRegisterName, + vtl: Option, + ) -> Result { + const HEADER_SIZE: usize = size_of::(); + + let header = hvdef::hypercall::GetSetVpRegisters { + partition_id: hvdef::HV_PARTITION_ID_SELF, + vp_index: hvdef::HV_VP_INDEX_SELF, + target_vtl: vtl.unwrap_or(HvInputVtl::CURRENT_VTL), + rsvd: [0; 3], + }; + + header.write_to_prefix(self.input_page().buffer.as_mut_slice()); + name.write_to_prefix(&mut self.input_page().buffer[HEADER_SIZE..]); + + let output = self.dispatch_hvcall(hvdef::HypercallCode::HvCallGetVpRegisters, Some(1)); + output.result()?; + let value = hvdef::HvRegisterValue::read_from_prefix(&self.output_page().buffer).unwrap(); + + Ok(value.0) + } + + /// Hypercall to apply vtl protections to the pages from address start to end + #[cfg_attr(target_arch = "aarch64", allow(dead_code))] + pub fn apply_vtl2_protections(&mut self, range: MemoryRange) -> Result<(), hvdef::HvError> { + const HEADER_SIZE: usize = size_of::(); + const MAX_INPUT_ELEMENTS: usize = (HV_PAGE_SIZE as usize - HEADER_SIZE) / size_of::(); + + let header = hvdef::hypercall::ModifyVtlProtectionMask { + partition_id: hvdef::HV_PARTITION_ID_SELF, + map_flags: hvdef::HV_MAP_GPA_PERMISSIONS_NONE, + target_vtl: HvInputVtl::CURRENT_VTL, + reserved: [0; 3], + }; + + let mut current_page = range.start_4k_gpn(); + while current_page < range.end_4k_gpn() { + let remaining_pages = range.end_4k_gpn() - current_page; + let count = remaining_pages.min(MAX_INPUT_ELEMENTS as u64); + + header.write_to_prefix(self.input_page().buffer.as_mut_slice()); + + let mut input_offset = HEADER_SIZE; + for i in 0..count { + let page_num = current_page + i; + page_num.write_to_prefix(&mut self.input_page().buffer[input_offset..]); + input_offset += size_of::(); + } + + let output = self.dispatch_hvcall( + hvdef::HypercallCode::HvCallModifyVtlProtectionMask, + Some(count as usize), + ); + + output.result()?; + + current_page += count; + } + + Ok(()) + } + + /// Hypercall to apply vtl protections to the pages from address start to end + #[cfg_attr(target_arch = "x86_64", allow(dead_code))] + pub fn apply_vtl_protections(&mut self, range: MemoryRange, vtl: Vtl) -> Result<(), hvdef::HvError> { + const HEADER_SIZE: usize = size_of::(); + const MAX_INPUT_ELEMENTS: usize = (HV_PAGE_SIZE as usize - HEADER_SIZE) / size_of::(); + + let header = hvdef::hypercall::ModifyVtlProtectionMask { + partition_id: hvdef::HV_PARTITION_ID_SELF, + map_flags: hvdef::HV_MAP_GPA_PERMISSIONS_NONE, + target_vtl: HvInputVtl::new() + .with_target_vtl_value(vtl.into()) + .with_use_target_vtl(true), + reserved: [0; 3], + }; + + let mut current_page = range.start_4k_gpn(); + while current_page < range.end_4k_gpn() { + let remaining_pages = range.end_4k_gpn() - current_page; + let count = remaining_pages.min(MAX_INPUT_ELEMENTS as u64); + + header.write_to_prefix(self.input_page().buffer.as_mut_slice()); + + let mut input_offset = HEADER_SIZE; + for i in 0..count { + let page_num = current_page + i; + page_num.write_to_prefix(&mut self.input_page().buffer[input_offset..]); + input_offset += size_of::(); + } + + let output = self.dispatch_hvcall( + hvdef::HypercallCode::HvCallModifyVtlProtectionMask, + Some(count as usize), + ); + + output.result()?; + + current_page += count; + } + + Ok(()) + } + + + #[cfg(target_arch = "x86_64")] + /// Hypercall to get the current VTL VP context + pub fn get_current_vtl_vp_context(&mut self) -> Result { + use hvdef::HvX64RegisterName; + use zerocopy::FromZeros; + let mut context :InitialVpContextX64 = FromZeros::new_zeroed(); + context.cr0 = self.get_register(HvX64RegisterName::Cr0.into(), None)?.as_u64(); + context.cr3 = self.get_register(HvX64RegisterName::Cr3.into(), None)?.as_u64(); + context.cr4 = self.get_register(HvX64RegisterName::Cr4.into(), None)?.as_u64(); + context.rip = self.get_register(HvX64RegisterName::Rip.into(), None)?.as_u64(); + context.rsp = self.get_register(HvX64RegisterName::Rsp.into(), None)?.as_u64(); + context.rflags = self.get_register(HvX64RegisterName::Rflags.into(), None)?.as_u64(); + context.cs = self.get_register(HvX64RegisterName::Cs.into(), None)?.as_segment(); + context.ss = self.get_register(HvX64RegisterName::Ss.into(), None)?.as_segment(); + context.ds = self.get_register(HvX64RegisterName::Ds.into(), None)?.as_segment(); + context.es = self.get_register(HvX64RegisterName::Es.into(), None)?.as_segment(); + context.fs = self.get_register(HvX64RegisterName::Fs.into(), None)?.as_segment(); + context.gs = self.get_register(HvX64RegisterName::Gs.into(), None)?.as_segment(); + context.gdtr = self.get_register(HvX64RegisterName::Gdtr.into(), None)?.as_table(); + context.idtr = self.get_register(HvX64RegisterName::Idtr.into(), None)?.as_table(); + context.tr = self.get_register(HvX64RegisterName::Tr.into(), None)?.as_segment(); + context.efer = self.get_register(HvX64RegisterName::Efer.into(), None)?.as_u64(); + Ok(context) + } + + pub fn high_vtl() { + let control: hvdef::hypercall::Control = hvdef::hypercall::Control::new() + .with_code(hvdef::HypercallCode::HvCallVtlCall.0) + .with_rep_count(0); + + // SAFETY: Invoking hypercall per TLFS spec + unsafe { + invoke_hypercall_vtl( + control, + ); + } + } + + pub fn low_vtl() { + let control: hvdef::hypercall::Control = hvdef::hypercall::Control::new() + .with_code(hvdef::HypercallCode::HvCallVtlReturn.0) + .with_rep_count(0); + // SAFETY: Invoking hypercall per TLFS spec + unsafe { + invoke_hypercall_vtl(control); + } + } + + pub fn enable_vtl_protection(&mut self, vp_index: u32, vtl: HvInputVtl) -> Result<(), hvdef::HvError> { + let hvreg = self.get_register(hvdef::HvX64RegisterName::VsmPartitionConfig.into(), Some(vtl))?; + let mut hvreg: HvRegisterVsmPartitionConfig = HvRegisterVsmPartitionConfig::from_bits(hvreg.as_u64()); + hvreg.set_enable_vtl_protection(true); + // hvreg.set_intercept_page(true); + // hvreg.set_default_vtl_protection_mask(0b11); + // hvreg.set_intercept_enable_vtl_protection(true); + let bits = hvreg.into_bits(); + let hvre: HvRegisterValue = hvdef::HvRegisterValue::from(bits); + self.set_register(HvX64RegisterName::VsmPartitionConfig.into(), hvre, Some(vtl)) + } + + #[cfg(target_arch = "x86_64")] + pub fn enable_vp_vtl(&mut self, vp_index: u32, target_vtl : Vtl, vp_context : Option) -> Result<(), hvdef::HvError> { + let header = hvdef::hypercall::EnableVpVtlX64 { + partition_id: hvdef::HV_PARTITION_ID_SELF, + vp_index, + target_vtl: target_vtl.into(), + reserved: [0; 3], + vp_vtl_context: vp_context.unwrap_or( zerocopy::FromZeros::new_zeroed()), + }; + + header.write_to_prefix(self.input_page().buffer.as_mut_slice()).expect("size of enable_vp_vtl header is not correct"); + + let output = self.dispatch_hvcall(hvdef::HypercallCode::HvCallEnableVpVtl, None); + match output.result() { + Ok(()) | Err(hvdef::HvError::VtlAlreadyEnabled) => Ok(()), + err => err, + } + } + + #[cfg(target_arch = "x86_64")] + pub fn start_virtual_processor(&mut self, vp_index: u32, target_vtl : Vtl, vp_context : Option) -> Result<(), hvdef::HvError> { + let header = hvdef::hypercall::StartVirtualProcessorX64 { + partition_id: hvdef::HV_PARTITION_ID_SELF, + vp_index: vp_index, + target_vtl: target_vtl.into(), + vp_context: vp_context.unwrap_or(zerocopy::FromZeros::new_zeroed()), + rsvd0: 0u8, + rsvd1: 0u16, + }; + + header.write_to_prefix(self.input_page().buffer.as_mut_slice()).expect("size of start_virtual_processor header is not correct"); + + let output = self.dispatch_hvcall(hvdef::HypercallCode::HvCallStartVirtualProcessor, None); + match output.result() { + Ok(()) => Ok(()), + err => panic!("Failed to start virtual processor: {:?}", err), + } + } + + pub fn enable_partition_vtl(&mut self, partition_id: u64, target_vtl : Vtl) -> Result<(), hvdef::HvError> { + let flags: EnablePartitionVtlFlags = + EnablePartitionVtlFlags::new() + .with_enable_mbec(false) + .with_enable_supervisor_shadow_stack(false); + + let header = hvdef::hypercall::EnablePartitionVtl { + partition_id, + target_vtl: target_vtl.into(), + flags, + reserved_z0: 0, + reserved_z1: 0, + }; + + let _ = header.write_to_prefix(self.input_page().buffer.as_mut_slice()); + + let output = self.dispatch_hvcall(hvdef::HypercallCode::HvCallEnablePartitionVtl, None); + match output.result() { + Ok(()) | Err(hvdef::HvError::VtlAlreadyEnabled) => Ok(()), + err => err, + } + } + + /// Hypercall to enable VP VTL + #[cfg(target_arch = "aarch64")] + pub fn enable_vp_vtl(&mut self, vp_index: u32) -> Result<(), hvdef::HvError> { + let header = hvdef::hypercall::EnableVpVtlArm64 { + partition_id: hvdef::HV_PARTITION_ID_SELF, + vp_index, + // The VTL value here is just a u8 and not the otherwise usual + // HvInputVtl value. + target_vtl: Vtl::Vtl2.into(), + reserved: [0; 3], + vp_vtl_context: zerocopy::FromZeroes::new_zeroed(), + }; + + header.write_to_prefix(self.input_page().buffer.as_mut_slice()); + + let output = self.dispatch_hvcall(hvdef::HypercallCode::HvCallEnableVpVtl, None); + match output.result() { + Ok(()) | Err(hvdef::HvError::VtlAlreadyEnabled) => Ok(()), + err => err, + } + } + + /// Hypercall to accept vtl2 pages from address start to end with VTL 2 + /// protections and no host visibility + #[cfg_attr(target_arch = "aarch64", allow(dead_code))] + pub fn accept_vtl2_pages( + &mut self, + range: MemoryRange, + memory_type: hvdef::hypercall::AcceptMemoryType, + ) -> Result<(), hvdef::HvError> { + const HEADER_SIZE: usize = size_of::(); + const MAX_INPUT_ELEMENTS: usize = (HV_PAGE_SIZE as usize - HEADER_SIZE) / size_of::(); + + let mut current_page = range.start_4k_gpn(); + while current_page < range.end_4k_gpn() { + let header = hvdef::hypercall::AcceptGpaPages { + partition_id: hvdef::HV_PARTITION_ID_SELF, + page_attributes: hvdef::hypercall::AcceptPagesAttributes::new() + .with_memory_type(memory_type.0) + .with_host_visibility(hvdef::hypercall::HostVisibilityType::PRIVATE) // no host visibility + .with_vtl_set(1 << 2), // applies vtl permissions for vtl 2 + vtl_permission_set: hvdef::hypercall::VtlPermissionSet { + vtl_permission_from_1: [0; hvdef::hypercall::HV_VTL_PERMISSION_SET_SIZE], + }, + gpa_page_base: current_page, + }; + + let remaining_pages = range.end_4k_gpn() - current_page; + let count = remaining_pages.min(MAX_INPUT_ELEMENTS as u64); + + header.write_to_prefix(self.input_page().buffer.as_mut_slice()); + + let output = self.dispatch_hvcall( + hvdef::HypercallCode::HvCallAcceptGpaPages, + Some(count as usize), + ); + + output.result()?; + + current_page += count; + } + + Ok(()) + } + + /// Get the corresponding VP indices from a list of VP hardware IDs (APIC + /// IDs on x64, MPIDR on ARM64). + /// + /// This always queries VTL0, since the hardware IDs are the same across the + /// VTLs in practice, and the hypercall only succeeds for VTL2 once VTL2 has + /// been enabled (which it might not be at this point). + pub fn get_vp_index_from_hw_id( + &mut self, + hw_ids: &[HwId], + output: &mut ArrayVec, + ) -> Result<(), hvdef::HvError> { + let header = hvdef::hypercall::GetVpIndexFromApicId { + partition_id: hvdef::HV_PARTITION_ID_SELF, + target_vtl: 0, + reserved: [0; 7], + }; + + // Split the call up to avoid exceeding the hypercall input/output size limits. + const MAX_PER_CALL: usize = 512; + + for hw_ids in hw_ids.chunks(MAX_PER_CALL) { + header.write_to_prefix(self.input_page().buffer.as_mut_slice()); + hw_ids.write_to_prefix(&mut self.input_page().buffer[header.as_bytes().len()..]); + + // SAFETY: The input header and rep slice are the correct types for this hypercall. + // The hypercall output is validated right after the hypercall is issued. + let r = self.dispatch_hvcall( + hvdef::HypercallCode::HvCallGetVpIndexFromApicId, + Some(hw_ids.len()), + ); + + let n = r.elements_processed() as usize; + + output.extend( + <[u32]>::ref_from_bytes(&mut self.output_page().buffer[..n * 4]) + .unwrap() + .iter() + .copied(), + ); + r.result()?; + assert_eq!(n, hw_ids.len()); + } + + Ok(()) + } +} + +/// The "hardware ID" used for [`HvCall::get_vp_index_from_hw_id`]. This is the +/// APIC ID on x64. +#[cfg(target_arch = "x86_64")] +pub type HwId = u32; + +/// The "hardware ID" used for [`HvCall::get_vp_index_from_hw_id`]. This is the +/// MPIDR on ARM64. +#[cfg(target_arch = "aarch64")] +pub type HwId = u64; \ No newline at end of file diff --git a/opentmk/src/uefi/hypvctx.rs b/opentmk/src/uefi/hypvctx.rs new file mode 100644 index 0000000000..f41725d928 --- /dev/null +++ b/opentmk/src/uefi/hypvctx.rs @@ -0,0 +1,403 @@ +use super::{ + context::{TestCtxTrait, VpExecutor}, + hypercall::HvCall, +}; +use crate::{debuglog, slog::AssertResult}; +use crate::uefi::alloc::ALLOCATOR; +use crate::{ + infolog, + slog::AssertOption, + sync::{Channel, Receiver, Sender}, +}; +use alloc::collections::btree_map::BTreeMap; +use alloc::collections::linked_list::LinkedList; +use alloc::{boxed::Box, vec::Vec}; +use core::alloc::{GlobalAlloc, Layout}; +use core::arch::asm; +use core::ops::Range; +use core::sync::atomic::{AtomicBool, Ordering}; +use hvdef::hypercall::{HvInputVtl, InitialVpContextX64}; +use hvdef::{HvAllArchRegisterName, HvRegisterName, Vtl}; +use memory_range::MemoryRange; +use minimal_rt::arch::msr::{read_msr, write_msr}; +use spin::Mutex; + +const ALIGNMENT: usize = 4096; + +type ComandTable = + BTreeMap, Vtl)>>; +static mut CMD: Mutex = Mutex::new(BTreeMap::new()); + +fn cmdt() -> &'static Mutex { + unsafe { &CMD } +} + +struct VpContext { + #[cfg(target_arch = "x86_64")] + ctx: InitialVpContextX64, + #[cfg(target_arch = "aarch64")] + ctx: InitialVpContextAarch64, +} + +fn register_command_queue(vp_index: u32) { + unsafe { + debuglog!("registering command queue for vp: {}", vp_index); + if CMD.lock().get(&vp_index).is_none() { + CMD.lock().insert(vp_index, LinkedList::new()); + debuglog!("registered command queue for vp: {}", vp_index); + } else { + debuglog!( + "command queue already registered for vp: {}", + vp_index + ); + } + } +} + +pub struct HvTestCtx { + pub hvcall: HvCall, + pub vp_runing: Vec<(u32, (bool, bool))>, + pub my_vp_idx: u32, + senders: Vec<(u64, Sender<(Box, Vtl)>)>, +} + +impl Drop for HvTestCtx { + fn drop(&mut self) { + self.hvcall.uninitialize(); + } +} + +impl TestCtxTrait for HvTestCtx { + fn start_on_vp(&mut self, cmd: VpExecutor) { + let (vp_index, vtl, cmd) = cmd.get(); + let cmd = cmd.expect_assert("error: failed to get command as cmd is none"); + if vtl >= Vtl::Vtl2 { + panic!("error: can't run on vtl2"); + } + let is_vp_running = self.vp_runing.iter_mut().find(|x| x.0 == vp_index); + + if let Some(running_vtl) = is_vp_running { + debuglog!("both vtl0 and vtl1 are running for VP: {:?}", vp_index); + } else { + if vp_index == 0 { + let vp_context = self + .get_default_context() + .expect("error: failed to get default context"); + self.hvcall + .enable_vp_vtl(0, Vtl::Vtl1, Some(vp_context)) + .expect("error: failed to enable vtl1"); + + cmdt().lock().get_mut(&vp_index).unwrap().push_back(( + Box::new(move |ctx| { + ctx.switch_to_low_vtl(); + }), + Vtl::Vtl1, + )); + self.switch_to_high_vtl(); + self.vp_runing.push((vp_index, (true, true))); + } else { + let my_idx = self.my_vp_idx; + cmdt().lock().get_mut(&self.my_vp_idx).unwrap().push_back(( + Box::new(move |ctx| { + ctx.enable_vp_vtl_with_default_context(vp_index, Vtl::Vtl1); + ctx.start_running_vp_with_default_context(VpExecutor::new( + vp_index, + Vtl::Vtl1, + )); + cmdt().lock().get_mut(&vp_index).unwrap().push_back(( + Box::new(move |ctx| { + ctx.set_default_ctx_to_vp(vp_index, Vtl::Vtl0); + }), + Vtl::Vtl1, + )); + ctx.switch_to_low_vtl(); + }), + Vtl::Vtl1, + )); + + self.switch_to_high_vtl(); + self.vp_runing.push((vp_index, (true, true))); + } + } + cmdt() + .lock() + .get_mut(&vp_index) + .unwrap() + .push_back((cmd, vtl)); + if vp_index == self.my_vp_idx && self.hvcall.vtl != vtl { + if vtl == Vtl::Vtl0 { + self.switch_to_low_vtl(); + } else { + self.switch_to_high_vtl(); + } + } + } + + fn queue_command_vp(&mut self, cmd: VpExecutor) { + let (vp_index, vtl, cmd) = cmd.get(); + let cmd = + cmd.expect_assert("error: failed to get command as cmd is none with queue command vp"); + cmdt() + .lock() + .get_mut(&vp_index) + .unwrap() + .push_back((cmd, vtl)); + } + + fn switch_to_high_vtl(&mut self) { + HvCall::high_vtl(); + } + + fn switch_to_low_vtl(&mut self) { + HvCall::low_vtl(); + } + + fn setup_partition_vtl(&mut self, vtl: Vtl) { + self.hvcall + .enable_partition_vtl(hvdef::HV_PARTITION_ID_SELF, vtl) + .expect_assert("Failed to enable VTL1 for the partition"); + infolog!("enabled vtl protections for the partition."); + } + fn setup_interrupt_handler(&mut self) { + crate::arch::interrupt::init(); + } + + fn setup_vtl_protection(&mut self) { + self.hvcall + .enable_vtl_protection(0, HvInputVtl::CURRENT_VTL) + .expect_assert("Failed to enable VTL protection, vtl1"); + + infolog!("enabled vtl protections for the partition."); + } + + fn setup_secure_intercept(&mut self, interrupt_idx: u8) { + let layout = Layout::from_size_align(4096, ALIGNMENT) + .expect_assert("error: failed to create layout for SIMP page"); + + let ptr = unsafe { ALLOCATOR.alloc(layout) }; + let gpn = (ptr as u64) >> 12; + let reg = (gpn << 12) | 0x1; + + unsafe { write_msr(hvdef::HV_X64_MSR_SIMP, reg.into()) }; + infolog!("Successfuly set the SIMP register."); + + let reg = unsafe { read_msr(hvdef::HV_X64_MSR_SINT0) }; + let mut reg: hvdef::HvSynicSint = reg.into(); + reg.set_vector(interrupt_idx); + reg.set_masked(false); + reg.set_auto_eoi(true); + + self.write_msr(hvdef::HV_X64_MSR_SINT0, reg.into()); + infolog!("Successfuly set the SINT0 register."); + } + + fn apply_vtl_protection_for_memory(&mut self, range: Range, vtl: Vtl) { + self.hvcall + .apply_vtl_protections(MemoryRange::new(range), vtl) + .expect_assert("Failed to apply VTL protections"); + } + + fn write_msr(&mut self, msr: u32, value: u64) { + unsafe { write_msr(msr, value) }; + } + + fn read_msr(&mut self, msr: u32) -> u64 { + unsafe { read_msr(msr) } + } + + fn start_running_vp_with_default_context(&mut self, cmd: VpExecutor) { + let (vp_index, vtl, cmd) = cmd.get(); + let vp_ctx = self + .get_default_context() + .expect_assert("error: failed to get default context"); + self.hvcall + .start_virtual_processor(vp_index, vtl, Some(vp_ctx)) + .expect_assert("error: failed to start vp"); + } + + fn set_default_ctx_to_vp(&mut self, vp_index: u32, vtl: Vtl) { + let i: u8 = match vtl { + Vtl::Vtl0 => 0, + Vtl::Vtl1 => 1, + Vtl::Vtl2 => 2, + _ => panic!("error: invalid vtl"), + }; + let vp_context = self + .get_default_context() + .expect_assert("error: failed to get default context"); + self.hvcall + .set_vp_registers( + vp_index, + Some( + HvInputVtl::new() + .with_target_vtl_value(i) + .with_use_target_vtl(true), + ), + Some(vp_context), + ) + .expect_assert("error: failed to set vp registers"); + } + + fn enable_vp_vtl_with_default_context(&mut self, vp_index: u32, vtl: Vtl) { + let vp_ctx = self + .get_default_context() + .expect_assert("error: failed to get default context"); + self.hvcall + .enable_vp_vtl(vp_index, vtl, Some(vp_ctx)) + .expect_assert("error: failed to enable vp vtl"); + } + + #[cfg(target_arch = "x86_64")] + fn set_interupt_idx(&mut self, interrupt_idx: u8, handler: fn()) { + crate::arch::interrupt::set_handler(interrupt_idx, handler); + } + + fn get_vp_count(&self) -> u32 { + let mut result: u32 = 0; + + unsafe { + // Call CPUID with EAX=1, but work around the rbx constraint + asm!( + "push rbx", // Save rbx + "cpuid", // Execute CPUID + "mov {result}, rbx", // Store ebx to our result variable + "pop rbx", // Restore rbx + in("eax") 1u32, // Input: CPUID leaf 1 + out("ecx") _, // Output registers (not used) + out("edx") _, // Output registers (not used) + result = out(reg) result, // Output: result from ebx + options(nomem, nostack) + ); + } + + // Extract logical processor count from bits [23:16] + (result >> 16) & 0xFF + } + + #[cfg(target_arch = "x86_64")] + fn get_register(&mut self, reg: u32) -> u128 { + use hvdef::HvX64RegisterName; + + let reg = HvX64RegisterName(reg); + self.hvcall + .get_register(reg.into(), None) + .expect_assert("error: failed to get register") + .as_u128() + } + + #[cfg(target_arch = "aarch64")] + fn get_register(&mut self, reg: u32) -> u128 { + use hvdef::HvAarch64RegisterName; + + let reg = HvAarch64RegisterName(reg); + self.hvcall + .get_register(reg.into(), None) + .expect_assert("error: failed to get register") + .as_u128() + } + + fn get_current_vp(&self) -> u32 { + self.my_vp_idx + } + + fn get_current_vtl(&self) -> Vtl { + self.hvcall.vtl + } +} + +impl HvTestCtx { + pub const fn new() -> Self { + HvTestCtx { + hvcall: HvCall::new(), + vp_runing: Vec::new(), + my_vp_idx: 0, + senders: Vec::new(), + } + } + + pub fn init(&mut self) { + self.hvcall.initialize(); + let vp_count = self.get_vp_count(); + for i in 0..vp_count { + register_command_queue(i); + } + } + + fn exec_handler() { + let mut ctx = HvTestCtx::new(); + ctx.init(); + let reg = ctx + .hvcall + .get_register(hvdef::HvAllArchRegisterName::VpIndex.into(), None) + .expect("error: failed to get vp index"); + let reg = reg.as_u64(); + ctx.my_vp_idx = reg as u32; + + loop { + let mut vtl: Option = None; + let mut cmd: Option> = None; + + { + let mut d = unsafe { CMD.lock() }; + let mut d = d.get_mut(&ctx.my_vp_idx); + if d.is_some() { + let mut d = d.unwrap(); + if !d.is_empty() { + let (c, v) = d.front().unwrap(); + if *v == ctx.hvcall.vtl { + let (c, v) = d.pop_front().unwrap(); + cmd = Some(c); + } else { + vtl = Some(*v); + } + } + } + } + + if let Some(vtl) = vtl { + if (vtl == Vtl::Vtl0) { + ctx.switch_to_low_vtl(); + } else { + ctx.switch_to_high_vtl(); + } + } + + if let Some(cmd) = cmd { + cmd(&mut ctx); + } + } + } + + #[cfg(target_arch = "x86_64")] + fn get_default_context(&mut self) -> Result { + return self.run_fn_with_current_context(HvTestCtx::exec_handler); + } + + #[cfg(target_arch = "x86_64")] + fn run_fn_with_current_context(&mut self, func: fn()) -> Result { + use super::alloc::SIZE_1MB; + + let mut vp_context: InitialVpContextX64 = self + .hvcall + .get_current_vtl_vp_context() + .expect("Failed to get VTL1 context"); + let stack_layout = Layout::from_size_align(SIZE_1MB, 16) + .expect("Failed to create layout for stack allocation"); + let x = unsafe { ALLOCATOR.alloc(stack_layout) }; + if x.is_null() { + return Err(false); + } + let sz = stack_layout.size(); + let stack_top = x as u64 + sz as u64; + let fn_ptr = func as fn(); + let fn_address = fn_ptr as u64; + vp_context.rip = fn_address; + vp_context.rsp = stack_top; + // print stack range + let stack_range = Range { + start: x as u64, + end: x as u64 + sz as u64, + }; + Ok(vp_context) + } +} diff --git a/opentmk/src/uefi/init.rs b/opentmk/src/uefi/init.rs new file mode 100644 index 0000000000..1f0535d479 --- /dev/null +++ b/opentmk/src/uefi/init.rs @@ -0,0 +1,57 @@ +use core::alloc::{GlobalAlloc, Layout}; + +use uefi::{boot::{exit_boot_services, MemoryType}, guid, println, CStr16, Status}; + +use crate::infolog; + +use super::{alloc::ALLOCATOR}; + + +fn enable_uefi_vtl_protection() { + let mut buf = vec![0u8; 1024]; + let mut str_buff = vec![0u16; 1024]; + let os_loader_indications_key = + CStr16::from_str_with_buf(&"OsLoaderIndications", str_buff.as_mut_slice()).unwrap(); + + let os_loader_indications_result = uefi::runtime::get_variable( + os_loader_indications_key, + &uefi::runtime::VariableVendor(guid!("610b9e98-c6f6-47f8-8b47-2d2da0d52a91")), + buf.as_mut(), + ) + .expect("Failed to get OsLoaderIndications"); + + let mut os_loader_indications = u32::from_le_bytes( + os_loader_indications_result.0[0..4] + .try_into() + .expect("error in output"), + ); + os_loader_indications |= 0x1u32; + + let os_loader_indications = os_loader_indications.to_le_bytes(); + + let _ = uefi::runtime::set_variable( + os_loader_indications_key, + &uefi::runtime::VariableVendor(guid!("610b9e98-c6f6-47f8-8b47-2d2da0d52a91")), + os_loader_indications_result.1, + &os_loader_indications, + ) + .expect("Failed to set OsLoaderIndications"); + + let os_loader_indications_result = uefi::runtime::get_variable( + os_loader_indications_key, + &uefi::runtime::VariableVendor(guid!("610b9e98-c6f6-47f8-8b47-2d2da0d52a91")), + buf.as_mut(), + ) + .expect("Failed to get OsLoaderIndications"); + + let _ = unsafe { exit_boot_services(MemoryType::BOOT_SERVICES_DATA) }; +} + +pub fn init() -> Result<(), Status> { + let r: bool = unsafe { ALLOCATOR.init(2048) }; + if r == false { + return Err(Status::ABORTED); + } + enable_uefi_vtl_protection(); + Ok(()) +} \ No newline at end of file diff --git a/opentmk/src/uefi/mod.rs b/opentmk/src/uefi/mod.rs new file mode 100644 index 0000000000..c09873dbf2 --- /dev/null +++ b/opentmk/src/uefi/mod.rs @@ -0,0 +1,36 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +mod alloc; +mod context; +pub mod hypercall; +mod hypvctx; +pub mod init; +mod rt; +mod tests; + +use crate::slog::{AssertOption, AssertResult}; +use crate::sync::{Channel, Receiver, Sender}; +use crate::uefi::alloc::ALLOCATOR; +use crate::{infolog, tmk_assert}; +use ::alloc::boxed::Box; +use ::alloc::vec::Vec; +use alloc::SIZE_1MB; +use context::{TestCtxTrait, VpExecutor}; +use core::alloc::{GlobalAlloc, Layout}; +use core::cell::RefCell; +use core::ops::Range; +use core::sync::atomic::{AtomicI32, Ordering}; +use hvdef::hypercall::HvInputVtl; +use hvdef::Vtl; +use hypvctx::HvTestCtx; +use init::init; +use uefi::entry; +use uefi::Status; + +#[entry] +fn uefi_main() -> Status { + init().expect_assert("Failed to initialize environment"); + tests::run_test(); + loop {} +} diff --git a/opentmk/src/uefi/rt.rs b/opentmk/src/uefi/rt.rs new file mode 100644 index 0000000000..4868ad542d --- /dev/null +++ b/opentmk/src/uefi/rt.rs @@ -0,0 +1,23 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +//! Runtime support for the UEFI application environment. + +#![cfg(target_os = "uefi")] +// UNSAFETY: Raw assembly needed for panic handling to abort. +#![expect(unsafe_code)] + +use crate::arch::serial::{Serial, InstrIoAccess}; +use core::fmt::Write; +use crate::slog; +use crate::sync::Mutex; + +#[panic_handler] +fn panic_handler(panic: &core::panic::PanicInfo<'_>) -> ! { + + let io = InstrIoAccess {}; + let mut ser = Mutex::new(Serial::new(io)); + crate::errorlog!("Panic at runtime: {}", panic); + crate::errorlog!("Could not shut down... falling back to invoking an undefined instruction"); + loop{} +} diff --git a/opentmk/src/uefi/tests/hv_misc.rs b/opentmk/src/uefi/tests/hv_misc.rs new file mode 100644 index 0000000000..3bda306d70 --- /dev/null +++ b/opentmk/src/uefi/tests/hv_misc.rs @@ -0,0 +1,136 @@ +// WIP : This test is not yet complete and is not expected to pass. +// +// This test is to verify that the VTL protections are working as expected. +// The stack values in VTL0 are changing after interrupt handling in VTL1. +use crate::slog::{AssertOption, AssertResult}; +use crate::sync::{Channel, Receiver, Sender}; +use crate::uefi::alloc::{ALLOCATOR, SIZE_1MB}; +use crate::uefi::{context, hypvctx}; +use crate::{infolog, tmk_assert}; +use ::alloc::boxed::Box; +use alloc::sync::Arc; +use ::alloc::vec::Vec; +use context::{TestCtxTrait, VpExecutor}; +use hypvctx::HvTestCtx; +use core::alloc::{GlobalAlloc, Layout}; +use core::arch::asm; +use core::cell::RefCell; +use core::ops::Range; +use core::sync::atomic::{AtomicI32, Ordering}; +use hvdef::hypercall::HvInputVtl; +use hvdef::{HvAllArchRegisterName, HvRegisterVsmVpStatus, HvX64RegisterName, Vtl}; +use uefi::entry; +use uefi::Status; + +static mut HEAPX: RefCell<*mut u8> = RefCell::new(0 as *mut u8); +static mut CON: AtomicI32 = AtomicI32::new(0); + +pub fn exec(ctx: &mut hypvctx::HvTestCtx ) { + infolog!("ctx ptr: {:p}", &ctx as *const _); + + let mut vp_count = ctx.get_vp_count(); + tmk_assert!(vp_count == 8, "vp count should be 8"); + + ctx.setup_interrupt_handler(); + + infolog!("set intercept handler successfully!"); + + ctx.setup_partition_vtl(Vtl::Vtl1); + + ctx.start_on_vp( + VpExecutor::new(0, Vtl::Vtl1).command(move |ctx: &mut dyn TestCtxTrait| { + infolog!("successfully started running VTL1 on vp0."); + ctx.setup_secure_intercept(0x30); + ctx.set_interupt_idx(0x30, || { + infolog!("interrupt fired!"); + + let mut hv_test_ctx = HvTestCtx::new(); + hv_test_ctx.init(); + + let c = hv_test_ctx.get_register(HvAllArchRegisterName::VsmVpStatus.0); + + let cp = HvRegisterVsmVpStatus::from_bits(c as u64); + + infolog!("VSM VP Status: {:?}", cp); + + infolog!("interrupt handled!"); + }); + + let layout = + Layout::from_size_align(SIZE_1MB, 4096).expect("msg: failed to create layout"); + let ptr = unsafe { ALLOCATOR.alloc(layout) }; + infolog!("allocated some memory in the heap from vtl1"); + unsafe { + let mut z = HEAPX.borrow_mut(); + *z = ptr; + *ptr.add(10) = 0xAA; + } + + let size = layout.size(); + ctx.setup_vtl_protection(); + + infolog!("enabled vtl protections for the partition."); + + let range = Range { + start: ptr as u64, + end: ptr as u64 + size as u64, + }; + + ctx.apply_vtl_protection_for_memory(range, Vtl::Vtl1); + + infolog!("moving to vtl0 to attempt to read the heap memory"); + + ctx.switch_to_low_vtl(); + }), + ); + + ctx.queue_command_vp(VpExecutor::new(0, Vtl::Vtl1).command(move |ctx| { + infolog!("successfully started running VTL1 on vp0."); + ctx.switch_to_low_vtl(); + })); + infolog!("ctx ptr: {:p}", &ctx as *const _); + + let mut l = 0u64; + unsafe { asm!("mov {}, rsp", out(reg) l) }; + infolog!("rsp: 0x{:x}", l); + unsafe { + infolog!("Attempting to read heap memory from vtl0"); + let heapx = *HEAPX.borrow(); + let val = *(heapx.add(10)); + infolog!( + "reading mutated heap memory from vtl0(it should not be 0xAA): 0x{:x}", + val + ); + tmk_assert!( + val != 0xAA, + "heap memory should not be accessible from vtl0" + ); + } + + unsafe { asm!("mov {}, rsp", out(reg) l) }; + infolog!("rsp: 0x{:x}", l); + + // let (mut tx, mut rx) = Channel::new(1); + // { + // let mut tx = tx.clone(); + // ctx.start_on_vp(VpExecutor::new(2, Vtl::Vtl0).command( + // move |ctx: &mut dyn TestCtxTrait| { + // infolog!("Hello form vtl0 on vp2!"); + // tx.send(()); + // }, + // )); + // } + infolog!("ctx ptr: {:p}", &ctx as *const _); + let c = ctx.get_vp_count(); + + tmk_assert!(c == 8, "vp count should be 8"); + + // rx.recv(); + + infolog!("we are in vtl0 now!"); + infolog!("we reached the end of the test"); + loop { + + } + +} \ No newline at end of file diff --git a/opentmk/src/uefi/tests/hv_processor.rs b/opentmk/src/uefi/tests/hv_processor.rs new file mode 100644 index 0000000000..b71a346065 --- /dev/null +++ b/opentmk/src/uefi/tests/hv_processor.rs @@ -0,0 +1,75 @@ +use alloc::vec::Vec; +use hvdef::Vtl; + +use crate::{ + criticallog, infolog, sync::{self, Mutex}, tmk_assert, uefi::context::{TestCtxTrait, VpExecutor} +}; + +static VP_RUNNING: Mutex> = Mutex::new(Vec::new()); + +pub fn exec(ctx: &mut dyn TestCtxTrait) { + ctx.setup_interrupt_handler(); + ctx.setup_partition_vtl(Vtl::Vtl1); + + let vp_count = ctx.get_vp_count(); + tmk_assert!(vp_count == 8, "vp count should be 8"); + + // Testing BSP VTL Bringup + { + let (mut tx, mut rx) = crate::sync::Channel::new().split(); + ctx.start_on_vp(VpExecutor::new(0, Vtl::Vtl1).command( + move |ctx: &mut dyn TestCtxTrait| { + let vp = ctx.get_current_vp(); + infolog!("vp: {}", vp); + tmk_assert!(vp == 0, "vp should be equal to 0"); + + let vtl = ctx.get_current_vtl(); + infolog!("vtl: {:?}", vtl); + tmk_assert!(vtl == Vtl::Vtl1, "vtl should be Vtl1 for BSP"); + tx.send(()); + ctx.switch_to_low_vtl(); + }, + )); + rx.recv(); + } + + for i in 1..vp_count { + // Testing VTL1 + { + let (mut tx, mut rx) = crate::sync::Channel::new().split(); + ctx.start_on_vp(VpExecutor::new(i, Vtl::Vtl1).command( + move |ctx: &mut dyn TestCtxTrait| { + let vp = ctx.get_current_vp(); + infolog!("vp: {}", vp); + tmk_assert!(vp == i, format!("vp should be equal to {}", i)); + + let vtl = ctx.get_current_vtl(); + infolog!("vtl: {:?}", vtl); + tmk_assert!(vtl == Vtl::Vtl1, format!("vtl should be Vtl0 for VP {}", i)); + tx.send(()); + }, + )); + rx.clone().recv(); + } + + // Testing VTL0 + { + let (mut tx, mut rx) = crate::sync::Channel::new().split(); + ctx.start_on_vp(VpExecutor::new(i, Vtl::Vtl0).command( + move |ctx: &mut dyn TestCtxTrait| { + let vp = ctx.get_current_vp(); + infolog!("vp: {}", vp); + tmk_assert!(vp == i, format!("vp should be equal to {}", i)); + + let vtl = ctx.get_current_vtl(); + infolog!("vtl: {:?}", vtl); + tmk_assert!(vtl == Vtl::Vtl0, format!("vtl should be Vtl0 for VP {}", i)); + tx.send(()); + }, + )); + rx.clone().recv(); + } + } + + criticallog!("All VPs have been tested"); +} diff --git a/opentmk/src/uefi/tests/mod.rs b/opentmk/src/uefi/tests/mod.rs new file mode 100644 index 0000000000..51686205e2 --- /dev/null +++ b/opentmk/src/uefi/tests/mod.rs @@ -0,0 +1,14 @@ +use alloc::sync::Arc; + +use super::hypvctx::HvTestCtx; + +pub mod hv_processor; +pub mod hv_misc; + +pub fn run_test() { + let mut ctx = HvTestCtx::new(); + ctx.init(); + + + hv_processor::exec(&mut ctx); +} \ No newline at end of file diff --git a/xtask/src/tasks/guest_test/uefi/gpt_efi_disk.rs b/xtask/src/tasks/guest_test/uefi/gpt_efi_disk.rs index fb4641a93b..862db7e55c 100644 --- a/xtask/src/tasks/guest_test/uefi/gpt_efi_disk.rs +++ b/xtask/src/tasks/guest_test/uefi/gpt_efi_disk.rs @@ -22,7 +22,7 @@ pub fn create_gpt_efi_disk(out_img: &Path, with_files: &[(&Path, &Path)]) -> Res )); } - let disk_size = 1024 * 1024 * 32; // 32MB disk should be enough for our tests + let disk_size = 1024 * 1024 * 512; // 32MB disk should be enough for our tests let num_sectors = disk_size / SECTOR_SIZE; let mut disk = vec![0; num_sectors * SECTOR_SIZE]; From 351199b347fad68e26c7bdf0e786e663be88752f Mon Sep 17 00:00:00 2001 From: Mayank Kumar Date: Mon, 21 Apr 2025 18:43:11 +0000 Subject: [PATCH 02/10] feat: added docs --- opentmk/src/uefi/hypercall.rs | 80 ++++++++++++++++++++++++++++++----- opentmk/src/uefi/hypvctx.rs | 76 ++++++++++++++++++++++++++++++--- opentmk/src/uefi/mod.rs | 2 +- opentmk/src/uefi/rt.rs | 14 +++--- 4 files changed, 146 insertions(+), 26 deletions(-) diff --git a/opentmk/src/uefi/hypercall.rs b/opentmk/src/uefi/hypercall.rs index 3ffe783e70..8e3aa61312 100644 --- a/opentmk/src/uefi/hypercall.rs +++ b/opentmk/src/uefi/hypercall.rs @@ -11,7 +11,6 @@ use hvdef::HvRegisterGuestVsmPartitionConfig; use hvdef::HvRegisterValue; use hvdef::HvRegisterVsmPartitionConfig; use hvdef::HvX64RegisterName; -use minimal_rt::arch::hypercall::{invoke_hypercall_vtl}; use zerocopy::FromZeros; use core::arch; use core::cell::RefCell; @@ -21,7 +20,7 @@ use hvdef::hypercall::HvInputVtl; use hvdef::Vtl; use hvdef::HV_PAGE_SIZE; use memory_range::MemoryRange; -use minimal_rt::arch::hypercall::invoke_hypercall; +use minimal_rt::arch::hypercall::{invoke_hypercall, HYPERCALL_PAGE}; use zerocopy::IntoBytes; use zerocopy::FromBytes; @@ -32,6 +31,19 @@ struct HvcallPage { } +pub fn invoke_hypercall_vtl(control: hvdef::hypercall::Control) { + // SAFETY: the caller guarantees the safety of this operation. + unsafe { + core::arch::asm! { + "call {hypercall_page}", + hypercall_page = sym HYPERCALL_PAGE, + inout("rcx") u64::from(control) => _, + in("rdx") 0, + in("rax") 0, + } + } +} + impl HvcallPage { pub const fn new() -> Self { HvcallPage { @@ -52,6 +64,59 @@ impl HvcallPage { } /// Provides mechanisms to invoke hypercalls within the boot shim. +/// +/// This module defines the `HvCall` struct and associated methods to interact with +/// hypervisor functionalities through hypercalls. It includes utilities for managing +/// hypercall pages, setting and getting virtual processor (VP) registers, enabling +/// VTL (Virtual Trust Levels), and applying memory protections. +/// +/// # Overview +/// +/// - **Hypercall Pages**: Manages page-aligned buffers for hypercall input and output. +/// - **VP Registers**: Provides methods to set and get VP registers. +/// - **VTL Management**: Includes methods to enable VTLs, apply VTL protections, and +/// manage VTL-specific operations. +/// - **Memory Protections**: Supports applying VTL protections and accepting VTL2 pages. +/// +/// # Safety +/// +/// Many methods in this module involve unsafe operations, such as invoking hypercalls +/// or interacting with low-level memory structures. The caller must ensure the safety +/// of these operations by adhering to the requirements of the hypervisor and the +/// underlying architecture. +/// +/// # Usage +/// +/// This module is designed for use in single-threaded environments, such as the boot +/// shim. It uses static buffers for hypercall pages, so it is not thread-safe. +/// +/// # Features +/// +/// - **Architecture-Specific Implementations**: Some methods are only available for +/// specific architectures (e.g., `x86_64` or `aarch64`). +/// - **Error Handling**: Methods return `Result` types to handle hypervisor errors. +/// +/// # Examples +/// +/// ```rust +/// let mut hv_call = HvCall::new(); +/// hv_call.initialize(); +/// let vtl = hv_call.vtl(); +/// println!("Current VTL: {:?}", vtl); +/// hv_call.uninitialize(); +/// ``` +/// +/// # Modules and Types +/// +/// - `HvCall`: Main struct for managing hypercalls. +/// - `HvcallPage`: Struct for page-aligned buffers. +/// - `HwId`: Type alias for hardware IDs (APIC ID on `x86_64`, MPIDR on `aarch64`). +/// +/// # Notes +/// +/// - This module assumes the presence of a hypervisor that supports the required +/// hypercalls. +/// - The boot shim must ensure that hypercalls are invoked in a valid context. /// Internally uses static buffers for the hypercall page, the input /// page, and the output page, so this should not be used in any /// multi-threaded capacity (which the boot shim currently is not). @@ -62,13 +127,6 @@ pub struct HvCall { output_page: HvcallPage, } -/// Returns an [`HvCall`] instance. -/// -/// Panics if another instance is already in use. -// #[track_caller] -// pub fn hvcall() -> core::cell::RefMut<'static, HvCall> { -// HVCALL.borrow_mut() -// } #[expect(unsafe_code)] impl HvCall { @@ -384,7 +442,7 @@ impl HvCall { Ok(context) } - pub fn high_vtl() { + pub fn vtl_call() { let control: hvdef::hypercall::Control = hvdef::hypercall::Control::new() .with_code(hvdef::HypercallCode::HvCallVtlCall.0) .with_rep_count(0); @@ -397,7 +455,7 @@ impl HvCall { } } - pub fn low_vtl() { + pub fn vtl_return() { let control: hvdef::hypercall::Control = hvdef::hypercall::Control::new() .with_code(hvdef::HypercallCode::HvCallVtlReturn.0) .with_rep_count(0); diff --git a/opentmk/src/uefi/hypvctx.rs b/opentmk/src/uefi/hypvctx.rs index f41725d928..79e74cc057 100644 --- a/opentmk/src/uefi/hypvctx.rs +++ b/opentmk/src/uefi/hypvctx.rs @@ -2,8 +2,8 @@ use super::{ context::{TestCtxTrait, VpExecutor}, hypercall::HvCall, }; -use crate::{debuglog, slog::AssertResult}; use crate::uefi::alloc::ALLOCATOR; +use crate::{debuglog, slog::AssertResult}; use crate::{ infolog, slog::AssertOption, @@ -46,10 +46,7 @@ fn register_command_queue(vp_index: u32) { CMD.lock().insert(vp_index, LinkedList::new()); debuglog!("registered command queue for vp: {}", vp_index); } else { - debuglog!( - "command queue already registered for vp: {}", - vp_index - ); + debuglog!("command queue already registered for vp: {}", vp_index); } } } @@ -67,6 +64,71 @@ impl Drop for HvTestCtx { } } +/// Implementation of the `TestCtxTrait` for the `HvTestCtx` structure, providing +/// various methods to manage and interact with virtual processors (VPs) and +/// Virtual Trust Levels (VTLs) in a hypervisor context. +/// +/// # Methods +/// +/// - `start_on_vp(&mut self, cmd: VpExecutor)`: +/// Starts a virtual processor (VP) on a specified VTL. Handles enabling VTLs, +/// switching between high and low VTLs, and managing VP execution contexts. +/// +/// - `queue_command_vp(&mut self, cmd: VpExecutor)`: +/// Queues a command for a specific VP and VTL. +/// +/// - `switch_to_high_vtl(&mut self)`: +/// Switches the current execution context to a high VTL. +/// +/// - `switch_to_low_vtl(&mut self)`: +/// Switches the current execution context to a low VTL. +/// +/// - `setup_partition_vtl(&mut self, vtl: Vtl)`: +/// Configures the partition to enable a specified VTL. +/// +/// - `setup_interrupt_handler(&mut self)`: +/// Sets up the interrupt handler for the architecture. +/// +/// - `setup_vtl_protection(&mut self)`: +/// Enables VTL protection for the current partition. +/// +/// - `setup_secure_intercept(&mut self, interrupt_idx: u8)`: +/// Configures secure intercept for a specified interrupt index, including +/// setting up the SIMP and SINT0 registers. +/// +/// - `apply_vtl_protection_for_memory(&mut self, range: Range, vtl: Vtl)`: +/// Applies VTL protections to a specified memory range. +/// +/// - `write_msr(&mut self, msr: u32, value: u64)`: +/// Writes a value to a specified Model-Specific Register (MSR). +/// +/// - `read_msr(&mut self, msr: u32) -> u64`: +/// Reads the value of a specified Model-Specific Register (MSR). +/// +/// - `start_running_vp_with_default_context(&mut self, cmd: VpExecutor)`: +/// Starts a VP with the default execution context. +/// +/// - `set_default_ctx_to_vp(&mut self, vp_index: u32, vtl: Vtl)`: +/// Sets the default execution context for a specified VP and VTL. +/// +/// - `enable_vp_vtl_with_default_context(&mut self, vp_index: u32, vtl: Vtl)`: +/// Enables a VTL for a specified VP using the default execution context. +/// +/// - `set_interupt_idx(&mut self, interrupt_idx: u8, handler: fn())`: +/// Sets an interrupt handler for a specified interrupt index. (x86_64 only) +/// +/// - `get_vp_count(&self) -> u32`: +/// Retrieves the number of virtual processors available on the system. +/// +/// - `get_register(&mut self, reg: u32) -> u128`: +/// Retrieves the value of a specified register. Supports both x86_64 and +/// aarch64 architectures. +/// +/// - `get_current_vp(&self) -> u32`: +/// Returns the index of the current virtual processor. +/// +/// - `get_current_vtl(&self) -> Vtl`: +/// Returns the current Virtual Trust Level (VTL). impl TestCtxTrait for HvTestCtx { fn start_on_vp(&mut self, cmd: VpExecutor) { let (vp_index, vtl, cmd) = cmd.get(); @@ -145,11 +207,11 @@ impl TestCtxTrait for HvTestCtx { } fn switch_to_high_vtl(&mut self) { - HvCall::high_vtl(); + HvCall::vtl_call(); } fn switch_to_low_vtl(&mut self) { - HvCall::low_vtl(); + HvCall::vtl_return(); } fn setup_partition_vtl(&mut self, vtl: Vtl) { diff --git a/opentmk/src/uefi/mod.rs b/opentmk/src/uefi/mod.rs index c09873dbf2..08cadaa5b5 100644 --- a/opentmk/src/uefi/mod.rs +++ b/opentmk/src/uefi/mod.rs @@ -32,5 +32,5 @@ use uefi::Status; fn uefi_main() -> Status { init().expect_assert("Failed to initialize environment"); tests::run_test(); - loop {} + Status::SUCCESS } diff --git a/opentmk/src/uefi/rt.rs b/opentmk/src/uefi/rt.rs index 4868ad542d..1623e2f960 100644 --- a/opentmk/src/uefi/rt.rs +++ b/opentmk/src/uefi/rt.rs @@ -7,17 +7,17 @@ // UNSAFETY: Raw assembly needed for panic handling to abort. #![expect(unsafe_code)] -use crate::arch::serial::{Serial, InstrIoAccess}; -use core::fmt::Write; +use crate::arch::serial::{InstrIoAccess, Serial}; use crate::slog; use crate::sync::Mutex; +use core::arch::asm; +use core::fmt::Write; #[panic_handler] fn panic_handler(panic: &core::panic::PanicInfo<'_>) -> ! { - - let io = InstrIoAccess {}; - let mut ser = Mutex::new(Serial::new(io)); crate::errorlog!("Panic at runtime: {}", panic); - crate::errorlog!("Could not shut down... falling back to invoking an undefined instruction"); - loop{} + unsafe { + asm!("int 8H"); + } + loop {} } From 3e1673e74f6ad7bc1afab39cb5a23c88c4a31d1c Mon Sep 17 00:00:00 2001 From: Mayank Kumar Date: Mon, 21 Apr 2025 18:50:56 +0000 Subject: [PATCH 03/10] refactor(opentmk): interrupt handler doesn't need to save state for all the registers --- .../arch/x86_64/interrupt_handler_register.rs | 81 +------------------ 1 file changed, 1 insertion(+), 80 deletions(-) diff --git a/opentmk/src/arch/x86_64/interrupt_handler_register.rs b/opentmk/src/arch/x86_64/interrupt_handler_register.rs index c015edb666..d2433c780b 100644 --- a/opentmk/src/arch/x86_64/interrupt_handler_register.rs +++ b/opentmk/src/arch/x86_64/interrupt_handler_register.rs @@ -9,86 +9,7 @@ static COMMON_HANDLER_MUTEX: Mutex<()> = Mutex::new(()); macro_rules! create_fn { ($name:ident, $i: expr) => { extern "x86-interrupt" fn $name(stack_frame: InterruptStackFrame) { - unsafe { - asm!(r#" - push rax - push rbx - push rcx - push rdx - push rsi - push rdi - push rbp - push rsp - push r8 - push r9 - push r10 - push r11 - push r12 - push r13 - push r14 - push r15 - - sub rsp, 256 - movups [rsp + 16 * 0], xmm0 - movups [rsp + 16 * 1], xmm1 - movups [rsp + 16 * 2], xmm2 - movups [rsp + 16 * 3], xmm3 - movups [rsp + 16 * 4], xmm4 - movups [rsp + 16 * 5], xmm5 - movups [rsp + 16 * 6], xmm6 - movups [rsp + 16 * 7], xmm7 - movups [rsp + 16 * 8], xmm8 - movups [rsp + 16 * 9], xmm9 - movups [rsp + 16 * 10], xmm10 - movups [rsp + 16 * 11], xmm11 - movups [rsp + 16 * 12], xmm12 - movups [rsp + 16 * 13], xmm13 - movups [rsp + 16 * 14], xmm14 - movups [rsp + 16 * 15], xmm15 -"#); - -unsafe { (COMMON_HANDLER)(stack_frame, $i) }; - -asm!(r#" - - - movups xmm0, [rsp + 16 * 0] - movups xmm1, [rsp + 16 * 1] - movups xmm2, [rsp + 16 * 2] - movups xmm3, [rsp + 16 * 3] - movups xmm4, [rsp + 16 * 4] - movups xmm5, [rsp + 16 * 5] - movups xmm6, [rsp + 16 * 6] - movups xmm7, [rsp + 16 * 7] - movups xmm8, [rsp + 16 * 8] - movups xmm9, [rsp + 16 * 9] - movups xmm10, [rsp + 16 * 10] - movups xmm11, [rsp + 16 * 11] - movups xmm12, [rsp + 16 * 12] - movups xmm13, [rsp + 16 * 13] - movups xmm14, [rsp + 16 * 14] - movups xmm15, [rsp + 16 * 15] - add rsp, 16 * 16 - - pop r15 - pop r14 - pop r13 - pop r12 - pop r11 - pop r10 - pop r9 - pop r8 - pop rsp - pop rbp - pop rdi - pop rsi - pop rdx - pop rcx - pop rbx - pop rax - - "#); - } + unsafe { (COMMON_HANDLER)(stack_frame, $i) }; } }; } From 056bf7dd8a78cda67986a6df4d805b11d8d78f6f Mon Sep 17 00:00:00 2001 From: Mayank Kumar Date: Mon, 21 Apr 2025 19:10:01 +0000 Subject: [PATCH 04/10] reafctor(opentmk): remove dead code --- opentmk/src/arch/aarch64/mod.rs | 3 +- opentmk/src/arch/aarch64/serial.rs | 240 ------------------------- opentmk/src/slog.rs | 2 +- opentmk/src/sync.rs | 160 +---------------- opentmk/src/uefi/tests/hv_processor.rs | 2 - opentmk/src/uefi/tests/mod.rs | 2 - 6 files changed, 6 insertions(+), 403 deletions(-) delete mode 100644 opentmk/src/arch/aarch64/serial.rs diff --git a/opentmk/src/arch/aarch64/mod.rs b/opentmk/src/arch/aarch64/mod.rs index 594be8b42a..c9ab11a58c 100644 --- a/opentmk/src/arch/aarch64/mod.rs +++ b/opentmk/src/arch/aarch64/mod.rs @@ -1,2 +1,3 @@ +pub use minimal_rt::arch::aarch64::serial; + pub mod hypercall; -pub mod serial; \ No newline at end of file diff --git a/opentmk/src/arch/aarch64/serial.rs b/opentmk/src/arch/aarch64/serial.rs deleted file mode 100644 index f68e6cb200..0000000000 --- a/opentmk/src/arch/aarch64/serial.rs +++ /dev/null @@ -1,240 +0,0 @@ -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. - -//! aarch64 MMIO-based serial port, UART PL011. -//! -//! Used for debug output. Follows -//! [PrimeCell UART (PL011) Technical Reference Manual](https://developer.arm.com/documentation/ddi0183/g/) -//! -//! PL011 Registers: -//! -//! Offset Name Type Reset Bits Description -//! ---------------------------------------------------------------------- -//! 0x000 UARTDR RW 0x--- 12/8 Data Register -//! 0x004 UARTRSR/UARTECR RW 0x0 4/0 Receive Status Register/Error Clear Register -//! 0x018 UARTFR RO 0b-10010--- 9 Flag Register -//! 0x020 UARTILPR RW 0x00 8 IrDA Low-Power Counter Register -//! 0x024 UARTIBRD RW 0x0000 16 Integer Baud Rate Register -//! 0x028 UARTFBRD RW 0x00 6 Fractional Baud Rate Register -//! 0x02C UARTLCR_H RW 0x00 8 Line Control Register -//! 0x030 UARTCR RW 0x0300 16 Control Register -//! 0x034 UARTIFLS RW 0x12 6 Interrupt FIFO Level Select Register -//! 0x038 UARTIMSC RW 0x000 11 Interrupt Mask Set/Clear Register -//! 0x03C UARTRIS RO 0x00- 11 Raw Interrupt Status Register -//! 0x040 UARTMIS RO 0x00- 11 Masked Interrupt Status Register -//! 0x044 UARTICR WO - 11 Interrupt Clear Register -//! 0x048 UARTDMACR RW 0x00 3 DMA Control Register -//! 0xFE0 UARTPeriphID0 RO 0x11 8 UARTPeriphID0 Register -//! 0xFE4 UARTPeriphID1 RO 0x10 8 UARTPeriphID1 Register -//! 0xFE8 UARTPeriphID2 RO 0x_4a 8 UARTPeriphID2 Register -//! 0xFEC UARTPeriphID3 RO 0x00 8 UARTPeriphID3 Register -//! 0xFF0 UARTPCellID0 RO 0x0D 8 UARTPCellID0 Register -//! 0xFF4 UARTPCellID1 RO 0xF0 8 UARTPCellID1 Register -//! 0xFF8 UARTPCellID2 RO 0x05 8 UARTPCellID2 Register -//! 0xFFC UARTPCellID3 RO 0xB1 8 UARTPCellID3 Register - -#![allow(dead_code)] - -use core::hint::spin_loop; -use core::sync::atomic::AtomicBool; -use core::sync::atomic::Ordering; - -#[derive(Debug, Clone, Copy)] -#[repr(u16)] -enum Pl011Register { - /// Data Register - Dr = 0x000, - /// Receive Status Register/Error Clear Register - RsrOrEcr = 0x004, - /// Flag register - Fr = 0x018, - /// Integer Baud Rate Register - Ibrd = 0x024, - /// Fractional Baud Rate Register - Fbrd = 0x028, - /// Line Control Register - LcrHigh = 0x02c, - /// Control Register - Cr = 0x030, - /// Masked Interrupt Status Register - Imsc = 0x038, - /// Interrupt Clear Register - Icr = 0x044, - /// DMA Control Register - DmaCr = 0x048, - /// UARTPeriphID0 Register - PeriphID0 = 0xFE0, - /// UARTPeriphID1 Register - PeriphID1 = 0xFE4, - /// UARTPeriphID2 Register - PeriphID2 = 0xFE8, - /// UARTPeriphID3 Register - PeriphID3 = 0xFEC, - /// UARTPCellID0 Register - PCellID0 = 0xFF0, - /// UARTPCellID1 Register - PCellID1 = 0xFF4, - /// UARTPCellID2 Register - PCellID2 = 0xFF8, - /// UARTPCellID3 Register - PCellID3 = 0xFFC, -} - -const CR_RX_ENABLE: u32 = 0x200; -const CR_TX_ENABLE: u32 = 0x100; -const CR_UART_ENABLE: u32 = 1; -const LCR_H_FIFO_EN: u32 = 0x10; -const LCR_H_8BITS: u32 = 0x60; - -const _FR_TX_EMPTY: u32 = 0x080; -const _FR_RX_FULL: u32 = 0x040; -const FR_TX_FULL: u32 = 0x020; -const _FR_RX_EMPTY: u32 = 0x010; -const FR_BUSY: u32 = 0x008; - -/// The Hyper-V PL011 host emulated PL011's are found at these -/// base addresses. Should come from ACPI or DT of course yet -/// due to having been hardcoded in some products makes that -/// virtually constants. -const PL011_HYPER_V_BASE_1: u64 = 0xeffec000; -const _PL011_HYPER_V_BASE_2: u64 = 0xeffeb000; -const PL011_BASE: u64 = PL011_HYPER_V_BASE_1; - -fn read_register(reg: Pl011Register) -> u32 { - // SAFETY: using the PL011 MMIO address. - unsafe { core::ptr::read_volatile((PL011_BASE + reg as u64) as *const u32) } -} - -fn write_register(reg: Pl011Register, val: u32) { - // SAFETY: using the PL011 MMIO address. - unsafe { - core::ptr::write_volatile((PL011_BASE + reg as u64) as *mut u32, val); - } -} - -fn cell_id() -> u32 { - // This can easily be rewritten employing - // bare arithmetic yet the compiler does a very good job - // so using the domain abstractions. - [ - Pl011Register::PCellID3, - Pl011Register::PCellID2, - Pl011Register::PCellID1, - Pl011Register::PCellID0, - ] - .iter() - .fold(0, |id_running, &r| { - id_running.wrapping_shl(8) | (read_register(r) as u8 as u32) - }) -} - -fn periph_id() -> u32 { - // This can easily be rewritten employing - // bare arithmetic yet the compiler does a very good job - // so using the domain abstractions. - [ - Pl011Register::PeriphID3, - Pl011Register::PeriphID2, - Pl011Register::PeriphID1, - Pl011Register::PeriphID0, - ] - .iter() - .fold(0, |id_running, &r| { - id_running.wrapping_shl(8) | (read_register(r) as u8 as u32) - }) -} - -fn poll_tx_not_full() { - while read_register(Pl011Register::Fr) & FR_TX_FULL != 0 { - spin_loop(); - } -} - -fn poll_not_busy() { - while read_register(Pl011Register::Fr) & FR_BUSY != 0 { - spin_loop(); - } -} - -/// Disables the functional parts of the UART, drains FIFOs, -/// sets baud rate and enables the UART in the polling mode. -/// Might be geared towards the real hardware more than the virtual one. -/// Works with qemu and Hyper-V. -fn reset_and_init() { - // Mask interrupts (lower 11 bits) - write_register(Pl011Register::Imsc, 0x7ff); - // Clear interrupts (lower 11 bits) - write_register(Pl011Register::Icr, 0x7ff); - // Disable DMA on Rx and Tx - write_register(Pl011Register::DmaCr, 0x0); - - // Leave Rx and Tx enabled to drain FIFOs. - write_register(Pl011Register::Cr, CR_RX_ENABLE | CR_TX_ENABLE); - read_register(Pl011Register::Cr); // wait - read_register(Pl011Register::Cr); // wait - poll_not_busy(); - - // Disable Rx, Tx, and UART. - write_register(Pl011Register::Cr, 0x00000000); - - // Set integer and fractional parts of the baud rate, - // hardcoded for now - write_register(Pl011Register::Fbrd, 0x00000004); - write_register(Pl011Register::Ibrd, 0x00000027); - // The UARTLCR_H, UARTIBRD, and UARTFBRD registers form the single 30-bit - // wide UARTLCR Register that is updated on a single write strobe generated by a - // UARTLCR_H write - write_register(Pl011Register::LcrHigh, LCR_H_FIFO_EN | LCR_H_8BITS); - - // Clear the errors - write_register(Pl011Register::RsrOrEcr, 0); - - // Enable Tx and Rx - write_register(Pl011Register::Cr, CR_RX_ENABLE | CR_TX_ENABLE); - read_register(Pl011Register::Cr); // wait - read_register(Pl011Register::Cr); // wait - poll_not_busy(); - - // Enable UART - write_register( - Pl011Register::Cr, - CR_RX_ENABLE | CR_TX_ENABLE | CR_UART_ENABLE, - ); - poll_not_busy(); -} - -/// A PL011 serial port. -pub struct Serial; - -static SUPPORTED: AtomicBool = AtomicBool::new(false); - -impl Serial { - /// Initializes the serial port. - pub fn init() -> Serial { - const SUPPORTED_PL011_CELLS: &[u32] = &[0xB105_F00D]; - - let cell_id = cell_id(); - let supported = SUPPORTED_PL011_CELLS.contains(&cell_id); - if supported { - reset_and_init(); - } - SUPPORTED.store(supported, Ordering::Relaxed); - - Self - } -} - -impl core::fmt::Write for Serial { - fn write_str(&mut self, s: &str) -> core::fmt::Result { - if !SUPPORTED.load(Ordering::Relaxed) { - return Ok(()); - } - - for byte in s.bytes() { - poll_tx_not_full(); - write_register(Pl011Register::Dr, byte.into()); - } - - Ok(()) - } -} diff --git a/opentmk/src/slog.rs b/opentmk/src/slog.rs index 2073788370..fcf15630a3 100644 --- a/opentmk/src/slog.rs +++ b/opentmk/src/slog.rs @@ -1,4 +1,5 @@ #![feature(panic_location)] +#[no_std] use core::any::type_name; use core::fmt::Write; @@ -7,7 +8,6 @@ use core::result; use crate::arch::serial::{InstrIoAccess, Serial}; use crate::sync::Mutex; use alloc::string::{String, ToString}; -#[no_std] use serde_json::json; use serde::Serialize; pub enum Level { diff --git a/opentmk/src/sync.rs b/opentmk/src/sync.rs index a26233bc76..08e4b105a6 100644 --- a/opentmk/src/sync.rs +++ b/opentmk/src/sync.rs @@ -2,167 +2,13 @@ use core::{arch::asm, cell::{RefCell, UnsafeCell}, fmt::Error, sync::atomic::{At pub use spin::Mutex; use alloc::{boxed::Box, string::{String, ToString}, sync::Arc, vec::Vec}; use alloc::collections::VecDeque; +#[cfg(feature = "std")] +use std::error::Error; +use core::fmt; use crate::infolog; -// pub struct LazyLock { -// lock: AtomicBool, -// init: fn() -> T, -// val: Option>, -// } - -// impl LazyLock { -// pub fn new(init: fn() -> T) -> Self { -// LazyLock { -// lock: AtomicBool::new(false), -// init, -// val: None, -// } -// } - -// pub fn get(&mut self) -> &T { -// if let ok = self.lock.get_mut() { -// if *ok { -// self.val = Some(RefCell::new((self.init)())); - -// } -// } -// if let Some(ref val) = self.val { -// return &val.borrow(); -// } -// panic!("LazyLock not initialized"); -// } - -// pub fn get_mut(&mut self) -> &mut T { -// if let ok = self.lock.get_mut() { -// if ok { -// self.val = Some((self.init)()); -// } -// } -// &mut self.val.unwrap() -// } -// } - -// pub struct Mutex { -// lock: AtomicBool, -// data: UnsafeCell, -// } - -// unsafe impl Sync for Mutex {} - -// impl Mutex { -// pub const fn new(data: T) -> Self { -// Mutex { -// lock: AtomicBool::new(false), -// data: UnsafeCell::new(data), -// } -// } - -// pub fn lock<'a>(&'a self) -> MutexGuard<'a, T> { -// while self.lock.compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed).is_err() { -// // Busy-wait until the lock is acquired -// core::hint::spin_loop(); -// } -// MutexGuard { mutex: self } -// } - -// pub fn unlock(&self) { -// self.lock.store(false, Ordering::Release); -// } -// } - -// pub struct MutexGuard<'a, T> { -// mutex: &'a Mutex, -// } -// impl<'a, T> Drop for MutexGuard<'a, T> { -// fn drop(&mut self) { -// self.mutex.unlock(); -// } -// } - -// impl<'a, T> core::ops::Deref for MutexGuard<'a, T> { -// type Target = T; - -// fn deref(&self) -> &Self::Target { -// unsafe { &*self.mutex.data.get() } -// } -// } - -// impl<'a, T> core::ops::DerefMut for MutexGuard<'a, T> { -// fn deref_mut(&mut self) -> &mut Self::Target { -// unsafe { &mut *self.mutex.data.get() } -// } -// } - -#[derive(Debug)] -pub struct RingBuffer { - buffer: Vec>, - capacity: usize, - head: usize, - tail: usize, - size: usize, -} - -impl RingBuffer { - pub fn new(capacity: usize) -> Self { - RingBuffer { - buffer: Vec::with_capacity(capacity), - capacity, - head: 0, - tail: 0, - size: 0, - } - } - - fn is_empty(&self) -> bool { - self.size == 0 - } - - fn is_full(&self) -> bool { - self.size == self.capacity - } - - pub fn push(&mut self, item: T) -> Result<(), String> { - if self.is_full() { - return Err("Buffer is full".to_string()); - } - - if self.tail == self.buffer.len() { - self.buffer.push(Some(item)); - } else { - self.buffer[self.tail] = Some(item); - } - - self.tail = (self.tail + 1) % self.capacity; - self.size += 1; - - Ok(()) - } - - pub fn pop(&mut self) -> Option { - if self.is_empty() { - return None; - } - - let item = core::mem::replace(&mut self.buffer[self.head], None); - self.head = (self.head + 1) % self.capacity; - self.size -= 1; - - Some(item.unwrap()) - } - - pub fn len(&self) -> usize { - self.size - } -} - - - - -#[cfg(feature = "std")] -use std::error::Error; -use core::fmt; /// An unbounded channel implementation with priority send capability. /// This implementation works in no_std environments using spin-rs. diff --git a/opentmk/src/uefi/tests/hv_processor.rs b/opentmk/src/uefi/tests/hv_processor.rs index b71a346065..6839669694 100644 --- a/opentmk/src/uefi/tests/hv_processor.rs +++ b/opentmk/src/uefi/tests/hv_processor.rs @@ -5,8 +5,6 @@ use crate::{ criticallog, infolog, sync::{self, Mutex}, tmk_assert, uefi::context::{TestCtxTrait, VpExecutor} }; -static VP_RUNNING: Mutex> = Mutex::new(Vec::new()); - pub fn exec(ctx: &mut dyn TestCtxTrait) { ctx.setup_interrupt_handler(); ctx.setup_partition_vtl(Vtl::Vtl1); diff --git a/opentmk/src/uefi/tests/mod.rs b/opentmk/src/uefi/tests/mod.rs index 51686205e2..9f5a7be616 100644 --- a/opentmk/src/uefi/tests/mod.rs +++ b/opentmk/src/uefi/tests/mod.rs @@ -8,7 +8,5 @@ pub mod hv_misc; pub fn run_test() { let mut ctx = HvTestCtx::new(); ctx.init(); - - hv_processor::exec(&mut ctx); } \ No newline at end of file From 6d37a843cc3c077988433ec61ec801d850b0d4a4 Mon Sep 17 00:00:00 2001 From: Mayank Kumar Date: Sun, 11 May 2025 19:51:31 +0000 Subject: [PATCH 05/10] chore: resolve PR feedback --- opentmk/Cargo.toml | 3 + opentmk/src/arch/x86_64/interrupt.rs | 15 +- .../arch/x86_64/interrupt_handler_register.rs | 50 ++- opentmk/src/arch/x86_64/serial.rs | 3 +- opentmk/src/main.rs | 8 +- opentmk/src/slog.rs | 242 ------------- opentmk/src/sync.rs | 13 +- opentmk/src/tests/hv_misc.rs | 145 ++++++++ opentmk/src/tests/hv_processor.rs | 74 ++++ opentmk/src/tests/mod.rs | 9 + opentmk/src/tmk_assert.rs | 115 ++++++ opentmk/src/tmk_logger.rs | 75 ++++ opentmk/src/uefi/alloc.rs | 55 +-- opentmk/src/uefi/context.rs | 33 +- opentmk/src/uefi/hypercall.rs | 333 +++++++++++------- opentmk/src/uefi/hypvctx.rs | 85 ++--- opentmk/src/uefi/init.rs | 25 +- opentmk/src/uefi/mod.rs | 16 +- opentmk/src/uefi/rt.rs | 8 +- opentmk/src/uefi/tests/hv_misc.rs | 45 +-- opentmk/src/uefi/tests/hv_processor.rs | 37 +- opentmk/src/uefi/tests/mod.rs | 2 - 22 files changed, 818 insertions(+), 573 deletions(-) delete mode 100644 opentmk/src/slog.rs create mode 100644 opentmk/src/tests/hv_misc.rs create mode 100644 opentmk/src/tests/hv_processor.rs create mode 100644 opentmk/src/tests/mod.rs create mode 100644 opentmk/src/tmk_assert.rs create mode 100644 opentmk/src/tmk_logger.rs diff --git a/opentmk/Cargo.toml b/opentmk/Cargo.toml index 752022b057..70f4c701c4 100644 --- a/opentmk/Cargo.toml +++ b/opentmk/Cargo.toml @@ -21,6 +21,9 @@ lazy_static = { version = "1.4.0", features = ["spin_no_std"] } serde_json = { version = "1.0", default-features = false, features = ["alloc"] } spin = "0.10.0" serde = {version = "1.0", default-features = false} +log = { version = "0.4", features = ["serde"] } +x86defs.workspace = true + [lints] workspace = true diff --git a/opentmk/src/arch/x86_64/interrupt.rs b/opentmk/src/arch/x86_64/interrupt.rs index d9a6ba7993..952629be30 100644 --- a/opentmk/src/arch/x86_64/interrupt.rs +++ b/opentmk/src/arch/x86_64/interrupt.rs @@ -1,14 +1,7 @@ - -use alloc::boxed::Box; -use alloc::sync::Arc; use x86_64::structures::idt::{InterruptDescriptorTable, InterruptStackFrame}; use lazy_static::lazy_static; -use core::cell::{Ref, RefCell}; -use core::concat_idents; use crate::sync::Mutex; -use crate::{criticallog, infolog}; - use super::interrupt_handler_register::{register_interrupt_handler, set_common_handler}; lazy_static! { @@ -24,7 +17,7 @@ static mut HANDLERS : [fn(); 256] = [no_op; 256]; static MUTEX: Mutex<()> = Mutex::new(()); fn no_op() {} -fn common_handler(stack_frame: InterruptStackFrame, interrupt: u8) { +fn common_handler(_stack_frame: InterruptStackFrame, interrupt: u8) { unsafe { HANDLERS[interrupt as usize](); } } @@ -38,13 +31,13 @@ extern "x86-interrupt" fn handler_double_fault( stack_frame: InterruptStackFrame, _error_code: u64, ) -> ! { - criticallog!("EXCEPTION:\n\tERROR_CODE: {}\n\tDOUBLE FAULT\n{:#?}", _error_code, stack_frame); + log::error!("EXCEPTION:\n\tERROR_CODE: {}\n\tDOUBLE FAULT\n{:#?}", _error_code, stack_frame); loop {} } // Initialize the IDT pub fn init() { - unsafe { IDT.load() }; + IDT.load(); set_common_handler(common_handler); - unsafe { x86_64::instructions::interrupts::enable() }; + x86_64::instructions::interrupts::enable(); } \ No newline at end of file diff --git a/opentmk/src/arch/x86_64/interrupt_handler_register.rs b/opentmk/src/arch/x86_64/interrupt_handler_register.rs index d2433c780b..25522d3c38 100644 --- a/opentmk/src/arch/x86_64/interrupt_handler_register.rs +++ b/opentmk/src/arch/x86_64/interrupt_handler_register.rs @@ -1,7 +1,7 @@ -use core::arch::asm; +#![allow(dead_code)] use x86_64::structures::idt::{InterruptDescriptorTable, InterruptStackFrame}; -use crate::{infolog, sync::Mutex}; +use crate::sync::Mutex; static mut COMMON_HANDLER: fn(InterruptStackFrame, u8) = common_handler; static COMMON_HANDLER_MUTEX: Mutex<()> = Mutex::new(()); @@ -20,28 +20,52 @@ macro_rules! register_interrupt_handler { }; } -fn common_handler(stack_frame: InterruptStackFrame, interrupt: u8) { - infolog!("Default interrupt handler fired: {}", interrupt); +fn common_handler(_stack_frame: InterruptStackFrame, interrupt: u8) { + log::info!("Default interrupt handler fired: {}", interrupt); } pub fn set_common_handler(handler: fn(InterruptStackFrame, u8)) { - let guard = COMMON_HANDLER_MUTEX.lock(); + let _guard = COMMON_HANDLER_MUTEX.lock(); unsafe { COMMON_HANDLER = handler; } } -extern "x86-interrupt" fn no_op(stack_frame: InterruptStackFrame) {} +extern "x86-interrupt" fn no_op(_stack_frame: InterruptStackFrame) {} pub fn register_interrupt_handler(idt: &mut InterruptDescriptorTable) { - register_interrupt_handler!(idt, 0, handler_0); - register_interrupt_handler!(idt, 1, handler_1); + register_interrupt_handler!(idt, x86defs::Exception::DIVIDE_ERROR.0, handler_0); + register_interrupt_handler!(idt, x86defs::Exception::DEBUG.0, handler_1); register_interrupt_handler!(idt, 2, handler_2); - register_interrupt_handler!(idt, 3, handler_3); - register_interrupt_handler!(idt, 4, handler_4); - register_interrupt_handler!(idt, 5, handler_5); - register_interrupt_handler!(idt, 6, handler_6); - register_interrupt_handler!(idt, 7, handler_7); + register_interrupt_handler!(idt, x86defs::Exception::BREAKPOINT.0, handler_3); + register_interrupt_handler!(idt, x86defs::Exception::OVERFLOW.0, handler_4); + register_interrupt_handler!(idt, x86defs::Exception::BOUND_RANGE_EXCEEDED.0, handler_5); + register_interrupt_handler!(idt, x86defs::Exception::INVALID_OPCODE.0, handler_6); + register_interrupt_handler!(idt, x86defs::Exception::DEVICE_NOT_AVAILABLE.0, handler_7); + // register_interrupt_handler!(idt, x86defs::Exception::DOUBLE_FAULT.0, handler_8); + register_interrupt_handler!(idt, 9, handler_9); + // register_interrupt_handler!(idt, x86defs::Exception::INVALID_TSS.0, handler_10); + // register_interrupt_handler!(idt, x86defs::Exception::SEGMENT_NOT_PRESENT.0, handler_11); + // register_interrupt_handler!(idt, x86defs::Exception::STACK_SEGMENT_FAULT.0, handler_12); + // register_interrupt_handler!(idt, x86defs::Exception::GENERAL_PROTECTION_FAULT.0, handler_13); + // register_interrupt_handler!(idt, x86defs::Exception::PAGE_FAULT.0, handler_14); + // register_interrupt_handler!(idt, 15, handler_15); + // register_interrupt_handler!(idt, x86defs::Exception::FLOATING_POINT_EXCEPTION.0, handler_16); + // register_interrupt_handler!(idt, x86defs::Exception::ALIGNMENT_CHECK.0, handler_17); + // register_interrupt_handler!(idt, x86defs::Exception::MACHINE_CHECK.0, handler_18); + // register_interrupt_handler!(idt, x86defs::Exception::SIMD_FLOATING_POINT_EXCEPTION.0, handler_19); + // register_interrupt_handler!(idt, 20, handler_20); + // register_interrupt_handler!(idt, 21, handler_21); + // register_interrupt_handler!(idt, 22, handler_22); + // register_interrupt_handler!(idt, 23, handler_23); + // register_interrupt_handler!(idt, 24, handler_24); + // register_interrupt_handler!(idt, 25, handler_25); + // register_interrupt_handler!(idt, 26, handler_26); + // register_interrupt_handler!(idt, 27, handler_27); + // register_interrupt_handler!(idt, 28, handler_28); + // register_interrupt_handler!(idt, x86defs::Exception::SEV_VMM_COMMUNICATION.0, handler_29); + // register_interrupt_handler!(idt, 30, handler_30); + // register_interrupt_handler!(idt, 31, handler_31); register_interrupt_handler!(idt, 32, handler_32); register_interrupt_handler!(idt, 33, handler_33); diff --git a/opentmk/src/arch/x86_64/serial.rs b/opentmk/src/arch/x86_64/serial.rs index 250fbd66cf..173794ad8f 100644 --- a/opentmk/src/arch/x86_64/serial.rs +++ b/opentmk/src/arch/x86_64/serial.rs @@ -2,10 +2,9 @@ // Licensed under the MIT License. //! Serial output for debugging. - +#![allow(static_mut_refs)] use core::arch::asm; use core::fmt; -use core::sync::atomic::AtomicBool; use crate::sync::Mutex; const COM4: u16 = 0x2E8; diff --git a/opentmk/src/main.rs b/opentmk/src/main.rs index 019f594f4d..d33c63c8af 100644 --- a/opentmk/src/main.rs +++ b/opentmk/src/main.rs @@ -1,11 +1,8 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#![allow(warnings)] #![no_std] #![allow(unsafe_code)] -#![feature(naked_functions)] #![feature(abi_x86_interrupt)] -#![feature(concat_idents)] #![doc = include_str!("../README.md")] // HACK: workaround for building guest_test_uefi as part of the workspace in CI. @@ -23,5 +20,6 @@ extern crate alloc; mod uefi; pub mod arch; -pub mod slog; -pub mod sync; \ No newline at end of file +pub mod tmk_assert; +pub mod sync; +pub mod tmk_logger; diff --git a/opentmk/src/slog.rs b/opentmk/src/slog.rs deleted file mode 100644 index fcf15630a3..0000000000 --- a/opentmk/src/slog.rs +++ /dev/null @@ -1,242 +0,0 @@ -#![feature(panic_location)] -#[no_std] - -use core::any::type_name; -use core::fmt::Write; -use core::result; - -use crate::arch::serial::{InstrIoAccess, Serial}; -use crate::sync::Mutex; -use alloc::string::{String, ToString}; -use serde_json::json; -use serde::Serialize; -pub enum Level { - DEBUG = 0, - INFO = 1, - WARNING = 2, - ERROR = 3, - CRITICAL = 4, -} - -pub fn get_json_string(s: &String, terminate_new_line: bool, level: Level) -> String { - let out = json!({ - "type:": "log", - "message": s, - "level": match level { - Level::DEBUG => "DEBUG", - Level::INFO => "INFO", - Level::WARNING => "WARNING", - Level::ERROR => "ERROR", - Level::CRITICAL => "CRITICAL", - } - }); - let mut out = out.to_string(); - if terminate_new_line { - out.push('\n'); - } - return out; -} - -pub fn get_json_test_assertion_string( - s: &str, - terminate_new_line: bool, - line: String, - assert_result: bool, - testname: &T, -) -> String where T: Serialize { - let out = json!({ - "type:": "assertion", - "message": s, - "level": "CRITICAL", - "line": line, - "assertion_result": assert_result, - "testname": testname, - }); - let mut out = out.to_string(); - if terminate_new_line { - out.push('\n'); - } - return out; -} - -pub static mut SERIAL: Serial = Serial::new(InstrIoAccess {}); - -#[macro_export] -macro_rules! tmk_assert { - ($condition:expr, $message:expr) => {{ - use core::fmt::Write; - let file = core::file!(); - let line = line!(); - let file_line = format!("{}:{}", file, line); - let expn = stringify!($condition); - let result: bool = $condition; - let js = - crate::slog::get_json_test_assertion_string(&expn, true, file_line, result, &$message); - unsafe { crate::slog::SERIAL.write_str(&js) }; - if !result { - panic!("Assertion failed: {}", $message); - } - }}; -} - -#[macro_export] -macro_rules! logt { - ($($arg:tt)*) => { - { - use core::fmt::Write; - let message = format!($($arg)*); - let js = crate::slog::get_json_string(&message, true, crate::slog::Level::INFO); - unsafe { crate::slog::SERIAL.write_str(&js) }; - } - }; -} - -#[macro_export] -macro_rules! errorlog { - ($($arg:tt)*) => { - { - use core::fmt::Write; - let message = format!($($arg)*); - let js = crate::slog::get_json_string(&message, true, crate::slog::Level::ERROR); - unsafe { crate::slog::SERIAL.write_str(&js) }; - } - }; -} - -#[macro_export] -macro_rules! debuglog { - ($($arg:tt)*) => { - { - use core::fmt::Write; - - let message = format!($($arg)*); - let js = crate::slog::get_json_string(&message, true, crate::slog::Level::DEBUG); - unsafe { crate::slog::SERIAL.write_str(&js) }; - } - }; -} - -#[macro_export] -macro_rules! infolog { - ($($arg:tt)*) => { - { - use core::fmt::Write; - - let message = format!($($arg)*); - let js = crate::slog::get_json_string(&message, true, crate::slog::Level::INFO); - unsafe { crate::slog::SERIAL.write_str(&js) }; - } - }; -} - -#[macro_export] -macro_rules! warninglog { - ($($arg:tt)*) => { - { - use core::fmt::Write; - - let message = format!($($arg)*); - let js = crate::slog::get_json_string(&message, true, crate::slog::Level::WARNING); - unsafe { crate::slog::SERIAL.write_str(&js) }; - } - }; -} - -#[macro_export] -macro_rules! criticallog { - ($($arg:tt)*) => { - { - use core::fmt::Write; - - let message = format!($($arg)*); - let js = crate::slog::get_json_string(&message, true, crate::slog::Level::CRITICAL); - unsafe { crate::slog::SERIAL.write_str(&js) }; - } - }; -} - -#[macro_export] -macro_rules! slog { - - ($serial:expr, $($arg:tt)*) => { - let mut serial : &mut Mutex> = &mut $serial; - let message = format!($($arg)*); - let js = slog::get_json_string(&message, true, crate::slog::Level::INFO); - { - let mut serial = serial.lock(); - serial.write_str(&js); - } - }; - -} - -pub trait AssertResult { - fn unpack_assert(self) -> T; - fn expect_assert(self, message: &str) -> T; -} - -pub trait AssertOption { - fn expect_assert(self, message: &str) -> T; -} - -impl AssertOption for Option { - fn expect_assert(self, message: &str) -> T { - match self { - Some(value) => value, - None => { - let call: &core::panic::Location<'_> = core::panic::Location::caller(); - let file_line = format!("{}:{}", call.file(), call.line()); - let expn = type_name::>(); - let js = crate::slog::get_json_test_assertion_string( - expn, true, file_line, false, &message, - ); - unsafe { crate::slog::SERIAL.write_str(&js) }; - panic!("Assertion failed: {}", message); - } - } - } -} - -impl AssertResult for Result -where - E: core::fmt::Debug, -{ - fn unpack_assert(self) -> T { - match self { - Ok(value) => value, - Err(err) => { - let call: &core::panic::Location<'_> = core::panic::Location::caller(); - let file_line = format!("{}:{}", call.file(), call.line()); - let expn = type_name::>(); - let js = crate::slog::get_json_test_assertion_string( - expn, - true, - file_line, - false, - &"ResultTest", - ); - unsafe { crate::slog::SERIAL.write_str(&js) }; - panic!("Assertion failed: {:?}", err); - } - } - } - fn expect_assert(self, message: &str) -> T { - match self { - Ok(value) => { - infolog!("result is ok, condition not met for: {}", message); - value - } - Err(err) => { - let call: &core::panic::Location<'_> = core::panic::Location::caller(); - let file_line = format!("{}:{}", call.file(), call.line()); - let expn = type_name::>(); - let js = crate::slog::get_json_test_assertion_string( - expn, true, file_line, false, &message, - ); - unsafe { crate::slog::SERIAL.write_str(&js) }; - - panic!("Assertion failed: {:?}", err); - } - } - } -} diff --git a/opentmk/src/sync.rs b/opentmk/src/sync.rs index 08e4b105a6..d0fe9eba59 100644 --- a/opentmk/src/sync.rs +++ b/opentmk/src/sync.rs @@ -1,15 +1,10 @@ -use core::{arch::asm, cell::{RefCell, UnsafeCell}, fmt::Error, sync::atomic::{AtomicBool, AtomicUsize, Ordering}}; +use core::sync::atomic::{AtomicUsize, Ordering}; pub use spin::Mutex; -use alloc::{boxed::Box, string::{String, ToString}, sync::Arc, vec::Vec}; +use alloc::{sync::Arc, vec::Vec}; use alloc::collections::VecDeque; -#[cfg(feature = "std")] -use std::error::Error; +use core::error::Error; use core::fmt; -use crate::infolog; - - - /// An unbounded channel implementation with priority send capability. /// This implementation works in no_std environments using spin-rs. /// It uses a VecDeque as the underlying buffer and ensures non-blocking operations. @@ -47,7 +42,6 @@ impl fmt::Display for SendError { } } -#[cfg(feature = "std")] impl Error for SendError {} /// Error type for receiving operations @@ -68,7 +62,6 @@ impl fmt::Display for RecvError { } } -#[cfg(feature = "std")] impl Error for RecvError {} /// Sender half of the channel diff --git a/opentmk/src/tests/hv_misc.rs b/opentmk/src/tests/hv_misc.rs new file mode 100644 index 0000000000..8379c344a0 --- /dev/null +++ b/opentmk/src/tests/hv_misc.rs @@ -0,0 +1,145 @@ +// WIP : This test is not yet complete and is not expected to pass. +// +// This test is to verify that the VTL protections are working as expected. +// The stack values in VTL0 are changing after interrupt handling in VTL1. +#![allow(warnings)] +use crate::slog::{AssertOption, AssertResult}; +use crate::sync::{Channel, Receiver, Sender}; +use crate::uefi::alloc::{ALLOCATOR, SIZE_1MB}; +use crate::uefi::{context, hypvctx}; +use crate::{infolog, tmk_assert}; +use ::alloc::boxed::Box; +use alloc::sync::Arc; +use ::alloc::vec::Vec; +use context::{TestCtxTrait, VpExecutor}; +use hypvctx::HvTestCtx; +use core::alloc::{GlobalAlloc, Layout}; +use core::arch::asm; +use core::cell::RefCell; +use core::ops::Range; +use core::sync::atomic::{AtomicI32, Ordering}; +use hvdef::hypercall::HvInputVtl; +use hvdef::{HvAllArchRegisterName, HvRegisterVsmVpStatus, HvX64RegisterName, Vtl}; + +static mut HEAPX: RefCell<*mut u8> = RefCell::new(0 as *mut u8); +static mut CON: AtomicI32 = AtomicI32::new(0); + +pub fn exec(_opt: Option<()>, ctx: Arc>) { + log::info!("ctx ptr: {:p}", &ctx as *const _); + + let mut cpy = ctx.clone(); + + let mut ctx = ctx.borrow_mut(); + + let mut vp_count = ctx.get_vp_count(); + tmk_assert!(vp_count == 8, "vp count should be 8"); + + ctx.setup_interrupt_handler(); + + log::info!("set intercept handler successfully!"); + + ctx.setup_partition_vtl(Vtl::Vtl1); + + ctx.start_on_vp( + VpExecutor::new(0, Vtl::Vtl1).command(move |ctx: &mut dyn TestCtxTrait| { + log::info!("successfully started running VTL1 on vp0."); + ctx.setup_secure_intercept(0x30); + ctx.set_interupt_idx(0x30, || { + log::info!("interrupt fired!"); + + // let mut hv_test_ctx: HvTestCtx = HvTestCtx::new(); + // hv_test_ctx.init(); + + // let c = hv_test_ctx.get_register(HvAllArchRegisterName::VsmVpStatus.0); + + // let cp: HvRegisterVsmVpStatus = HvRegisterVsmVpStatus::from_bits(c as u64); + + // log::info!("VSM VP Status: {:?}", cp); + + log::info!("interrupt handled!"); + }); + + let layout = + Layout::from_size_align(SIZE_1MB, 4096).expect("msg: failed to create layout"); + let ptr = unsafe { ALLOCATOR.alloc(layout) }; + log::info!("allocated some memory in the heap from vtl1"); + unsafe { + let mut z = HEAPX.borrow_mut(); + *z = ptr; + *ptr.add(10) = 0xAA; + } + + let size = layout.size(); + ctx.setup_vtl_protection(); + + log::info!("enabled vtl protections for the partition."); + + let range = Range { + start: ptr as u64, + end: ptr as u64 + size as u64, + }; + + ctx.apply_vtl_protection_for_memory(range, Vtl::Vtl1); + + log::info!("moving to vtl0 to attempt to read the heap memory"); + + ctx.switch_to_low_vtl(); + }), + ); + + ctx.queue_command_vp(VpExecutor::new(0, Vtl::Vtl1).command(move |ctx| { + log::info!("successfully started running VTL1 on vp0."); + ctx.switch_to_low_vtl(); + })); + log::info!("ctx ptr: {:p}", &ctx as *const _); + log::info!("_opt ptr: {:p}", &_opt as *const _); + let mut l = 0u64; + unsafe { asm!("mov {}, rsp", out(reg) l) }; + log::info!("rsp: 0x{:x}", l); + unsafe { + log::info!("Attempting to read heap memory from vtl0"); + let heapx = *HEAPX.borrow(); + let val = *(heapx.add(10)); + log::info!( + "reading mutated heap memory from vtl0(it should not be 0xAA): 0x{:x}", + val + ); + tmk_assert!( + val != 0xAA, + "heap memory should not be accessible from vtl0" + ); + } + + unsafe { asm!("mov {}, rsp", out(reg) l) }; + log::info!("rsp: 0x{:x}", l); + + // let (mut tx, mut rx) = Channel::new(1); + // { + // let mut tx = tx.clone(); + // ctx.start_on_vp(VpExecutor::new(2, Vtl::Vtl0).command( + // move |ctx: &mut dyn TestCtxTrait| { + // log::info!("Hello form vtl0 on vp2!"); + // tx.send(()); + // }, + // )); + // } + + drop(ctx); + + let mut ctx = cpy.borrow_mut(); + // let mut ctx = cpy.borrow_mut(); + log::info!("ctx ptr: {:p}", &ctx as *const _); + log::info!("opt ptr: {:p}", &_opt as *const _); + let c = ctx.get_vp_count(); + + tmk_assert!(c == 8, "vp count should be 8"); + + // rx.recv(); + + log::info!("we are in vtl0 now!"); + log::info!("we reached the end of the test"); + loop { + + } + +} \ No newline at end of file diff --git a/opentmk/src/tests/hv_processor.rs b/opentmk/src/tests/hv_processor.rs new file mode 100644 index 0000000000..64439039db --- /dev/null +++ b/opentmk/src/tests/hv_processor.rs @@ -0,0 +1,74 @@ +use hvdef::Vtl; + +use crate::{ + criticallog, infolog, + tmk_assert, + uefi::context::{TestCtxTrait, VpExecutor}, +}; + +pub fn exec(ctx: &mut dyn TestCtxTrait) { + ctx.setup_interrupt_handler(); + ctx.setup_partition_vtl(Vtl::Vtl1); + + let vp_count = ctx.get_vp_count(); + tmk_assert!(vp_count == 8, "vp count should be 8"); + + // Testing BSP VTL Bringup + { + let (tx, rx) = crate::sync::Channel::new().split(); + ctx.start_on_vp(VpExecutor::new(0, Vtl::Vtl1).command( + move |ctx: &mut dyn TestCtxTrait| { + let vp = ctx.get_current_vp(); + log::info!("vp: {}", vp); + tmk_assert!(vp == 0, "vp should be equal to 0"); + + let vtl = ctx.get_current_vtl(); + log::info!("vtl: {:?}", vtl); + tmk_assert!(vtl == Vtl::Vtl1, "vtl should be Vtl1 for BSP"); + _ = tx.send(()); + ctx.switch_to_low_vtl(); + }, + )); + _ = rx.recv(); + } + + for i in 1..vp_count { + // Testing VTL1 + { + let (tx, rx) = crate::sync::Channel::new().split(); + ctx.start_on_vp(VpExecutor::new(i, Vtl::Vtl1).command( + move |ctx: &mut dyn TestCtxTrait| { + let vp = ctx.get_current_vp(); + log::info!("vp: {}", vp); + tmk_assert!(vp == i, format!("vp should be equal to {}", i)); + + let vtl = ctx.get_current_vtl(); + log::info!("vtl: {:?}", vtl); + tmk_assert!(vtl == Vtl::Vtl1, format!("vtl should be Vtl1 for VP {}", i)); + _ = tx.send(()); + }, + )); + _ = rx.recv(); + } + + // Testing VTL0 + { + let (tx, rx) = crate::sync::Channel::new().split(); + ctx.start_on_vp(VpExecutor::new(i, Vtl::Vtl0).command( + move |ctx: &mut dyn TestCtxTrait| { + let vp = ctx.get_current_vp(); + log::info!("vp: {}", vp); + tmk_assert!(vp == i, format!("vp should be equal to {}", i)); + + let vtl = ctx.get_current_vtl(); + log::info!("vtl: {:?}", vtl); + tmk_assert!(vtl == Vtl::Vtl0, format!("vtl should be Vtl0 for VP {}", i)); + _ = tx.send(()); + }, + )); + _ = rx.recv(); + } + } + + log::error!("All VPs have been tested"); +} diff --git a/opentmk/src/tests/mod.rs b/opentmk/src/tests/mod.rs new file mode 100644 index 0000000000..2457eb039e --- /dev/null +++ b/opentmk/src/tests/mod.rs @@ -0,0 +1,9 @@ +mod hv_processor; +mod hv_misc; + +use crate::uefi::hypvctx::HvTestCtx; + +pub fn run_test() { + let mut ctx = HvTestCtx::new(); + hv_processor::exec(&mut ctx); +} \ No newline at end of file diff --git a/opentmk/src/tmk_assert.rs b/opentmk/src/tmk_assert.rs new file mode 100644 index 0000000000..a33e3ca447 --- /dev/null +++ b/opentmk/src/tmk_assert.rs @@ -0,0 +1,115 @@ +use core::{any::type_name, fmt::Write}; +use alloc::string::{String, ToString}; +use serde::Serialize; +use serde_json::json; + +pub fn format_asset_json_string( + s: &str, + terminate_new_line: bool, + line: String, + assert_result: bool, + testname: &T, +) -> String +where + T: Serialize, +{ + let out = json!({ + "type:": "assert", + "level": "WARN", + "message": s, + "line": line, + "assertion_result": assert_result, + "testname": testname, + }); + let mut out = out.to_string(); + if terminate_new_line { + out.push('\n'); + } + return out; +} + + +pub fn write_str(s: &str) { + let _ = crate::tmk_logger::LOGGER.get_writter().write_str(s); +} + +#[macro_export] +macro_rules! tmk_assert { + ($condition:expr, $message:expr) => { + { + let file = core::file!(); + let line = line!(); + let file_line = format!("{}:{}", file, line); + let expn = stringify!($condition); + let result: bool = $condition; + let js = crate::tmk_assert::format_asset_json_string( + &expn, true, file_line, result, &$message, + ); + crate::tmk_assert::write_str(&js); + if !result { + panic!("Assertion failed: {}", $message); + } + } + }; +} + +pub trait AssertResult { + fn unpack_assert(self) -> T; + fn expect_assert(self, message: &str) -> T; +} + +pub trait AssertOption { + fn expect_assert(self, message: &str) -> T; +} + +impl AssertOption for Option { + fn expect_assert(self, message: &str) -> T { + match self { + Some(value) => value, + None => { + let call: &core::panic::Location<'_> = core::panic::Location::caller(); + let file_line = format!("{}:{}", call.file(), call.line()); + let expn = type_name::>(); + let js = format_asset_json_string(expn, true, file_line, false, &message); + write_str(&js); + panic!("Assertion failed: {}", message); + } + } + } +} + +impl AssertResult for Result +where + E: core::fmt::Debug, +{ + fn unpack_assert(self) -> T { + match self { + Ok(value) => value, + Err(err) => { + let call: &core::panic::Location<'_> = core::panic::Location::caller(); + let file_line = format!("{}:{}", call.file(), call.line()); + let expn = type_name::>(); + let js = + format_asset_json_string(expn, true, file_line, false, &"ResultTest"); + write_str(&js); + panic!("Assertion failed: {:?}", err); + } + } + } + fn expect_assert(self, message: &str) -> T { + match self { + Ok(value) => { + log::info!("result is ok, condition not met for: {}", message); + value + } + Err(err) => { + let call: &core::panic::Location<'_> = core::panic::Location::caller(); + let file_line = format!("{}:{}", call.file(), call.line()); + let expn = type_name::>(); + let js = format_asset_json_string(expn, true, file_line, false, &message); + write_str(&js); + panic!("Assertion failed: {:?}", err); + } + } + } +} diff --git a/opentmk/src/tmk_logger.rs b/opentmk/src/tmk_logger.rs new file mode 100644 index 0000000000..467d775c49 --- /dev/null +++ b/opentmk/src/tmk_logger.rs @@ -0,0 +1,75 @@ +use core::fmt::Write; + +use alloc::{fmt::format, string::{String, ToString}}; +use log::SetLoggerError; +use serde_json::json; +use spin::{mutex::Mutex, MutexGuard}; + +use crate::arch::serial::{InstrIoAccess, Serial}; + +pub fn format_log_string_to_json( + message: &String, + line: &String, + terminate_new_line: bool, + level: log::Level, +) -> String { + let out = json!({ + "type:": "log", + "level": level.as_str(), + "message": message, + "line": line, + }); + let mut out = out.to_string(); + if terminate_new_line { + out.push('\n'); + } + return out; +} + +pub struct TmkLogger { + pub writter: T, +} + +impl TmkLogger> +where + T: Write + Send, +{ + pub const fn new(provider: T) -> Self { + TmkLogger { + writter: Mutex::new(provider), + } + } + + pub fn get_writter(&self) -> MutexGuard<'_, T> where T: Write + Send { + self.writter.lock() + } +} + +impl log::Log for TmkLogger> +where + T: Write + Send, +{ + fn enabled(&self, _metadata: &log::Metadata<'_>) -> bool { + true + } + + fn log(&self, record: &log::Record<'_>) { + let str = format(*record.args()); + let line = format!( + "{}:{}", + record.file().unwrap_or_default(), + record.line().unwrap_or_default() + ); + let str = format_log_string_to_json(&str, &line, true, record.level()); + _ = self.writter.lock().write_str(str.as_str()); + } + + fn flush(&self) {} +} + +pub static LOGGER: TmkLogger>> = + TmkLogger::new(Serial::new(InstrIoAccess {})); + +pub fn init() -> Result<(), SetLoggerError> { + log::set_logger(&LOGGER).map(|()| log::set_max_level(log::LevelFilter::Debug)) +} diff --git a/opentmk/src/uefi/alloc.rs b/opentmk/src/uefi/alloc.rs index 92619fdee5..edc36d054d 100644 --- a/opentmk/src/uefi/alloc.rs +++ b/opentmk/src/uefi/alloc.rs @@ -2,15 +2,19 @@ use core::{alloc::GlobalAlloc, cell::RefCell}; use linked_list_allocator::LockedHeap; use spin::mutex::Mutex; -use uefi::{allocator::Allocator, boot::{self, AllocateType, MemoryType}}; +use uefi::{ + allocator::Allocator, + boot::{self, AllocateType, MemoryType}, +}; -pub const SIZE_1MB: usize = 1024 * 1024; +pub const SIZE_1MB: usize = 1024 * 1024; +const PAGE_SIZE: usize = 4096; #[global_allocator] pub static ALLOCATOR: MemoryAllocator = MemoryAllocator { use_locked_heap: Mutex::new(RefCell::new(false)), locked_heap: LockedHeap::empty(), - uefi_allocator: Allocator{}, + uefi_allocator: Allocator {}, }; pub struct MemoryAllocator { @@ -24,7 +28,7 @@ unsafe impl GlobalAlloc for MemoryAllocator { #[allow(unsafe_code)] unsafe fn alloc(&self, layout: core::alloc::Layout) -> *mut u8 { if *self.use_locked_heap.lock().borrow() { - unsafe { self.locked_heap.alloc(layout) } + unsafe { self.locked_heap.alloc(layout) } } else { unsafe { self.uefi_allocator.alloc(layout) } } @@ -37,31 +41,38 @@ unsafe impl GlobalAlloc for MemoryAllocator { unsafe { self.uefi_allocator.dealloc(ptr, layout) } } } - + unsafe fn alloc_zeroed(&self, layout: core::alloc::Layout) -> *mut u8 { if *self.use_locked_heap.lock().borrow() { unsafe { self.locked_heap.alloc_zeroed(layout) } - } else { - unsafe { self.uefi_allocator.alloc_zeroed(layout) } - } + } else { + unsafe { self.uefi_allocator.alloc_zeroed(layout) } + } } - - unsafe fn realloc(&self, ptr: *mut u8, layout: core::alloc::Layout, new_size: usize) -> *mut u8 { + + unsafe fn realloc( + &self, + ptr: *mut u8, + layout: core::alloc::Layout, + new_size: usize, + ) -> *mut u8 { if *self.use_locked_heap.lock().borrow() { unsafe { self.locked_heap.realloc(ptr, layout, new_size) } - } else { - unsafe { self.uefi_allocator.realloc(ptr, layout, new_size) } - } + } else { + unsafe { self.uefi_allocator.realloc(ptr, layout, new_size) } + } } } impl MemoryAllocator { - - #[expect(unsafe_code)] - pub unsafe fn init(&self, size: usize) -> bool { + pub fn init(&self, size: usize) -> bool { let pages = ((SIZE_1MB * size) / 4096) + 1; let size = pages * 4096; - let mem: Result, uefi::Error> = boot::allocate_pages(AllocateType::AnyPages, MemoryType::BOOT_SERVICES_DATA, pages); + let mem: Result, uefi::Error> = boot::allocate_pages( + AllocateType::AnyPages, + MemoryType::BOOT_SERVICES_DATA, + pages, + ); if mem.is_err() { return false; } @@ -73,10 +84,14 @@ impl MemoryAllocator { return true; } + #[allow(dead_code)] pub fn get_page_alligned_memory(&self, size: usize) -> *mut u8 { - let pages = ((SIZE_1MB * size) / 4096) + 1; - let size = pages * 4096; - let mem: Result, uefi::Error> = boot::allocate_pages(AllocateType::AnyPages, MemoryType::BOOT_SERVICES_DATA, pages); + let pages = ((SIZE_1MB * size) / PAGE_SIZE) + 1; + let mem: Result, uefi::Error> = boot::allocate_pages( + AllocateType::AnyPages, + MemoryType::BOOT_SERVICES_DATA, + pages, + ); if mem.is_err() { return core::ptr::null_mut(); } diff --git a/opentmk/src/uefi/context.rs b/opentmk/src/uefi/context.rs index b9e5da65d6..3249c0bd3f 100644 --- a/opentmk/src/uefi/context.rs +++ b/opentmk/src/uefi/context.rs @@ -1,3 +1,4 @@ +#![allow(dead_code)] use core::ops::Range; use alloc::boxed::Box; @@ -6,32 +7,30 @@ use hvdef::Vtl; pub trait TestCtxTrait { - fn get_vp_count(&self) -> u32; - fn get_current_vp(&self) -> u32; - fn get_current_vtl(&self) -> Vtl; - - fn start_on_vp(&mut self, cmd: VpExecutor); - - fn queue_command_vp(&mut self, cmd: VpExecutor); - - fn switch_to_high_vtl(&mut self); - fn switch_to_low_vtl(&mut self); + // partition wide Traits + fn get_vp_count(&self) -> u32; + fn setup_vtl_protection(&mut self); fn setup_partition_vtl(&mut self, vtl: Vtl); fn setup_interrupt_handler(&mut self); - fn set_interupt_idx(&mut self, interrupt_idx: u8, handler: fn()); - - fn setup_vtl_protection(&mut self); + fn set_interrupt_idx(&mut self, interrupt_idx: u8, handler: fn()); + fn start_on_vp(&mut self, cmd: VpExecutor); + fn queue_command_vp(&mut self, cmd: VpExecutor); fn setup_secure_intercept(&mut self, interrupt_idx: u8); fn apply_vtl_protection_for_memory(&mut self, range: Range, vtl: Vtl); - fn write_msr(&mut self, msr: u32, value: u64); - fn read_msr(&mut self, msr: u32) -> u64; - - fn start_running_vp_with_default_context(&mut self, cmd: VpExecutor); fn set_default_ctx_to_vp(&mut self, vp_index: u32, vtl: Vtl); + fn start_running_vp_with_default_context(&mut self, cmd: VpExecutor); fn enable_vp_vtl_with_default_context(&mut self, vp_index: u32, vtl: Vtl); + fn write_msr(&mut self, msr: u32, value: u64); + fn read_msr(&mut self, msr: u32) -> u64; + // per vp wide Traits + fn get_current_vp(&self) -> u32; + fn get_current_vtl(&self) -> Vtl; + fn switch_to_high_vtl(&mut self); + fn switch_to_low_vtl(&mut self); fn get_register(&mut self, reg: u32) -> u128; + } pub struct VpExecutor { diff --git a/opentmk/src/uefi/hypercall.rs b/opentmk/src/uefi/hypercall.rs index 8e3aa61312..26aa18db9a 100644 --- a/opentmk/src/uefi/hypercall.rs +++ b/opentmk/src/uefi/hypercall.rs @@ -3,32 +3,26 @@ //! Hypercall infrastructure. +#![allow(dead_code)] use arrayvec::ArrayVec; +use core::mem::size_of; use hvdef::hypercall::EnablePartitionVtlFlags; +use hvdef::hypercall::HvInputVtl; use hvdef::hypercall::InitialVpContextX64; -use hvdef::HvInterruptType; -use hvdef::HvRegisterGuestVsmPartitionConfig; use hvdef::HvRegisterValue; use hvdef::HvRegisterVsmPartitionConfig; use hvdef::HvX64RegisterName; -use zerocopy::FromZeros; -use core::arch; -use core::cell::RefCell; -use core::cell::UnsafeCell; -use core::mem::size_of; -use hvdef::hypercall::HvInputVtl; use hvdef::Vtl; use hvdef::HV_PAGE_SIZE; use memory_range::MemoryRange; use minimal_rt::arch::hypercall::{invoke_hypercall, HYPERCALL_PAGE}; -use zerocopy::IntoBytes; use zerocopy::FromBytes; +use zerocopy::IntoBytes; /// Page-aligned, page-sized buffer for use with hypercalls #[repr(C, align(4096))] struct HvcallPage { buffer: [u8; HV_PAGE_SIZE as usize], - } pub fn invoke_hypercall_vtl(control: hvdef::hypercall::Control) { @@ -48,7 +42,6 @@ impl HvcallPage { pub const fn new() -> Self { HvcallPage { buffer: [0; HV_PAGE_SIZE as usize], - } } @@ -64,7 +57,7 @@ impl HvcallPage { } /// Provides mechanisms to invoke hypercalls within the boot shim. -/// +/// /// This module defines the `HvCall` struct and associated methods to interact with /// hypervisor functionalities through hypercalls. It includes utilities for managing /// hypercall pages, setting and getting virtual processor (VP) registers, enabling @@ -122,25 +115,18 @@ impl HvcallPage { /// multi-threaded capacity (which the boot shim currently is not). pub struct HvCall { initialized: bool, - pub vtl: Vtl, input_page: HvcallPage, output_page: HvcallPage, } - #[expect(unsafe_code)] impl HvCall { pub const fn new() -> Self { - // SAFETY: The caller must ensure that this is only called once. - unsafe { - HvCall { - initialized: false, - vtl: Vtl::Vtl0, - input_page: HvcallPage::new(), - output_page: HvcallPage::new(), - } + HvCall { + initialized: false, + input_page: HvcallPage::new(), + output_page: HvcallPage::new(), } - } fn input_page(&mut self) -> &mut HvcallPage { &mut self.input_page @@ -155,7 +141,7 @@ impl HvCall { #[cfg(target_arch = "x86_64")] pub fn hypercall_page(&mut self) -> u64 { self.init_if_needed(); - core::ptr::addr_of!(minimal_rt::arch::hypercall::HYPERCALL_PAGE) as u64 + core::ptr::addr_of!(HYPERCALL_PAGE) as u64 } fn init_if_needed(&mut self) { @@ -171,15 +157,6 @@ impl HvCall { let guest_os_id = hvdef::hypercall::HvGuestOsMicrosoft::new().with_os_id(1); crate::arch::hypercall::initialize(guest_os_id.into()); self.initialized = true; - - self.vtl = self - .get_register(hvdef::HvAllArchRegisterName::VsmVpStatus.into(), None) - .map_or(Vtl::Vtl0, |status| { - hvdef::HvRegisterVsmVpStatus::from(status.as_u64()) - .active_vtl() - .try_into() - .unwrap() - }); } /// Call before jumping to kernel. @@ -191,9 +168,16 @@ impl HvCall { } /// Returns the environment's VTL. - pub fn vtl(&self) -> Vtl { + pub fn vtl(&mut self) -> Vtl { assert!(self.initialized); - self.vtl + self + .get_register(hvdef::HvAllArchRegisterName::VsmVpStatus.into(), None) + .map_or(Vtl::Vtl0, |status| { + hvdef::HvRegisterVsmVpStatus::from(status.as_u64()) + .active_vtl() + .try_into() + .unwrap() + }) } /// Makes a hypercall. @@ -219,12 +203,11 @@ impl HvCall { } } - pub fn set_vp_registers( &mut self, vp: u32, vtl: Option, - vp_context : Option, + vp_context: Option, ) -> Result<(), hvdef::HvError> { const HEADER_SIZE: usize = size_of::(); @@ -235,55 +218,105 @@ impl HvCall { rsvd: [0; 3], }; - header.write_to_prefix(self.input_page().buffer.as_mut_slice()); + let _ = header.write_to_prefix(self.input_page().buffer.as_mut_slice()); let mut input_offset = HEADER_SIZE; let mut count = 0; - let mut write_reg = |reg_name: hvdef::HvRegisterName, reg_value: hvdef::HvRegisterValue| { + let mut write_reg = |reg_name: hvdef::HvRegisterName, reg_value: HvRegisterValue| { let reg = hvdef::hypercall::HvRegisterAssoc { name: reg_name, pad: Default::default(), value: reg_value, }; - reg.write_to_prefix(&mut self.input_page().buffer[input_offset..]); + let _ = reg.write_to_prefix(&mut self.input_page().buffer[input_offset..]); input_offset += size_of::(); count += 1; }; // pub msr_cr_pat: u64, - write_reg(hvdef::HvX64RegisterName::Cr0.into(), vp_context.unwrap().cr0.into()); - write_reg(hvdef::HvX64RegisterName::Cr3.into(), vp_context.unwrap().cr3.into()); - write_reg(hvdef::HvX64RegisterName::Cr4.into(), vp_context.unwrap().cr4.into()); - write_reg(hvdef::HvX64RegisterName::Rip.into(), vp_context.unwrap().rip.into()); - write_reg(hvdef::HvX64RegisterName::Rsp.into(), vp_context.unwrap().rsp.into()); - write_reg(hvdef::HvX64RegisterName::Rflags.into(), vp_context.unwrap().rflags.into()); - write_reg(hvdef::HvX64RegisterName::Cs.into(), vp_context.unwrap().cs.into()); - write_reg(hvdef::HvX64RegisterName::Ss.into(), vp_context.unwrap().ss.into()); - write_reg(hvdef::HvX64RegisterName::Ds.into(), vp_context.unwrap().ds.into()); - write_reg(hvdef::HvX64RegisterName::Es.into(), vp_context.unwrap().es.into()); - write_reg(hvdef::HvX64RegisterName::Fs.into(), vp_context.unwrap().fs.into()); - write_reg(hvdef::HvX64RegisterName::Gs.into(), vp_context.unwrap().gs.into()); - write_reg(hvdef::HvX64RegisterName::Gdtr.into(), vp_context.unwrap().gdtr.into()); - write_reg(hvdef::HvX64RegisterName::Idtr.into(), vp_context.unwrap().idtr.into()); - write_reg(hvdef::HvX64RegisterName::Ldtr.into(), vp_context.unwrap().ldtr.into()); - write_reg(hvdef::HvX64RegisterName::Tr.into(), vp_context.unwrap().tr.into()); - write_reg(hvdef::HvX64RegisterName::Efer.into(), vp_context.unwrap().efer.into()); + write_reg( + HvX64RegisterName::Cr0.into(), + vp_context.unwrap().cr0.into(), + ); + write_reg( + HvX64RegisterName::Cr3.into(), + vp_context.unwrap().cr3.into(), + ); + write_reg( + HvX64RegisterName::Cr4.into(), + vp_context.unwrap().cr4.into(), + ); + write_reg( + HvX64RegisterName::Rip.into(), + vp_context.unwrap().rip.into(), + ); + write_reg( + HvX64RegisterName::Rsp.into(), + vp_context.unwrap().rsp.into(), + ); + write_reg( + HvX64RegisterName::Rflags.into(), + vp_context.unwrap().rflags.into(), + ); + write_reg( + HvX64RegisterName::Cs.into(), + vp_context.unwrap().cs.into(), + ); + write_reg( + HvX64RegisterName::Ss.into(), + vp_context.unwrap().ss.into(), + ); + write_reg( + HvX64RegisterName::Ds.into(), + vp_context.unwrap().ds.into(), + ); + write_reg( + HvX64RegisterName::Es.into(), + vp_context.unwrap().es.into(), + ); + write_reg( + HvX64RegisterName::Fs.into(), + vp_context.unwrap().fs.into(), + ); + write_reg( + HvX64RegisterName::Gs.into(), + vp_context.unwrap().gs.into(), + ); + write_reg( + HvX64RegisterName::Gdtr.into(), + vp_context.unwrap().gdtr.into(), + ); + write_reg( + HvX64RegisterName::Idtr.into(), + vp_context.unwrap().idtr.into(), + ); + write_reg( + HvX64RegisterName::Ldtr.into(), + vp_context.unwrap().ldtr.into(), + ); + write_reg( + HvX64RegisterName::Tr.into(), + vp_context.unwrap().tr.into(), + ); + write_reg( + HvX64RegisterName::Efer.into(), + vp_context.unwrap().efer.into(), + ); let output = self.dispatch_hvcall(hvdef::HypercallCode::HvCallSetVpRegisters, Some(count)); output.result() } - /// Hypercall for setting a register to a value. pub fn set_register( &mut self, name: hvdef::HvRegisterName, - value: hvdef::HvRegisterValue, - vtl: Option + value: HvRegisterValue, + vtl: Option, ) -> Result<(), hvdef::HvError> { const HEADER_SIZE: usize = size_of::(); @@ -294,7 +327,7 @@ impl HvCall { rsvd: [0; 3], }; - header.write_to_prefix(self.input_page().buffer.as_mut_slice()); + let _ = header.write_to_prefix(self.input_page().buffer.as_mut_slice()); let reg = hvdef::hypercall::HvRegisterAssoc { name, @@ -302,7 +335,7 @@ impl HvCall { value, }; - reg.write_to_prefix(&mut self.input_page().buffer[HEADER_SIZE..]); + let _ = reg.write_to_prefix(&mut self.input_page().buffer[HEADER_SIZE..]); let output = self.dispatch_hvcall(hvdef::HypercallCode::HvCallSetVpRegisters, Some(1)); @@ -314,7 +347,7 @@ impl HvCall { &mut self, name: hvdef::HvRegisterName, vtl: Option, - ) -> Result { + ) -> Result { const HEADER_SIZE: usize = size_of::(); let header = hvdef::hypercall::GetSetVpRegisters { @@ -324,12 +357,12 @@ impl HvCall { rsvd: [0; 3], }; - header.write_to_prefix(self.input_page().buffer.as_mut_slice()); - name.write_to_prefix(&mut self.input_page().buffer[HEADER_SIZE..]); + let _ = header.write_to_prefix(self.input_page().buffer.as_mut_slice()); + let _ = name.write_to_prefix(&mut self.input_page().buffer[HEADER_SIZE..]); let output = self.dispatch_hvcall(hvdef::HypercallCode::HvCallGetVpRegisters, Some(1)); output.result()?; - let value = hvdef::HvRegisterValue::read_from_prefix(&self.output_page().buffer).unwrap(); + let value = HvRegisterValue::read_from_prefix(&self.output_page().buffer).unwrap(); Ok(value.0) } @@ -352,12 +385,12 @@ impl HvCall { let remaining_pages = range.end_4k_gpn() - current_page; let count = remaining_pages.min(MAX_INPUT_ELEMENTS as u64); - header.write_to_prefix(self.input_page().buffer.as_mut_slice()); + let _ = header.write_to_prefix(self.input_page().buffer.as_mut_slice()); let mut input_offset = HEADER_SIZE; for i in 0..count { let page_num = current_page + i; - page_num.write_to_prefix(&mut self.input_page().buffer[input_offset..]); + let _ = page_num.write_to_prefix(&mut self.input_page().buffer[input_offset..]); input_offset += size_of::(); } @@ -376,7 +409,11 @@ impl HvCall { /// Hypercall to apply vtl protections to the pages from address start to end #[cfg_attr(target_arch = "x86_64", allow(dead_code))] - pub fn apply_vtl_protections(&mut self, range: MemoryRange, vtl: Vtl) -> Result<(), hvdef::HvError> { + pub fn apply_vtl_protections( + &mut self, + range: MemoryRange, + vtl: Vtl, + ) -> Result<(), hvdef::HvError> { const HEADER_SIZE: usize = size_of::(); const MAX_INPUT_ELEMENTS: usize = (HV_PAGE_SIZE as usize - HEADER_SIZE) / size_of::(); @@ -384,8 +421,8 @@ impl HvCall { partition_id: hvdef::HV_PARTITION_ID_SELF, map_flags: hvdef::HV_MAP_GPA_PERMISSIONS_NONE, target_vtl: HvInputVtl::new() - .with_target_vtl_value(vtl.into()) - .with_use_target_vtl(true), + .with_target_vtl_value(vtl.into()) + .with_use_target_vtl(true), reserved: [0; 3], }; @@ -394,12 +431,12 @@ impl HvCall { let remaining_pages = range.end_4k_gpn() - current_page; let count = remaining_pages.min(MAX_INPUT_ELEMENTS as u64); - header.write_to_prefix(self.input_page().buffer.as_mut_slice()); + let _ = header.write_to_prefix(self.input_page().buffer.as_mut_slice()); let mut input_offset = HEADER_SIZE; for i in 0..count { let page_num = current_page + i; - page_num.write_to_prefix(&mut self.input_page().buffer[input_offset..]); + let _ = page_num.write_to_prefix(&mut self.input_page().buffer[input_offset..]); input_offset += size_of::(); } @@ -416,29 +453,60 @@ impl HvCall { Ok(()) } - #[cfg(target_arch = "x86_64")] /// Hypercall to get the current VTL VP context pub fn get_current_vtl_vp_context(&mut self) -> Result { - use hvdef::HvX64RegisterName; + use HvX64RegisterName; use zerocopy::FromZeros; - let mut context :InitialVpContextX64 = FromZeros::new_zeroed(); - context.cr0 = self.get_register(HvX64RegisterName::Cr0.into(), None)?.as_u64(); - context.cr3 = self.get_register(HvX64RegisterName::Cr3.into(), None)?.as_u64(); - context.cr4 = self.get_register(HvX64RegisterName::Cr4.into(), None)?.as_u64(); - context.rip = self.get_register(HvX64RegisterName::Rip.into(), None)?.as_u64(); - context.rsp = self.get_register(HvX64RegisterName::Rsp.into(), None)?.as_u64(); - context.rflags = self.get_register(HvX64RegisterName::Rflags.into(), None)?.as_u64(); - context.cs = self.get_register(HvX64RegisterName::Cs.into(), None)?.as_segment(); - context.ss = self.get_register(HvX64RegisterName::Ss.into(), None)?.as_segment(); - context.ds = self.get_register(HvX64RegisterName::Ds.into(), None)?.as_segment(); - context.es = self.get_register(HvX64RegisterName::Es.into(), None)?.as_segment(); - context.fs = self.get_register(HvX64RegisterName::Fs.into(), None)?.as_segment(); - context.gs = self.get_register(HvX64RegisterName::Gs.into(), None)?.as_segment(); - context.gdtr = self.get_register(HvX64RegisterName::Gdtr.into(), None)?.as_table(); - context.idtr = self.get_register(HvX64RegisterName::Idtr.into(), None)?.as_table(); - context.tr = self.get_register(HvX64RegisterName::Tr.into(), None)?.as_segment(); - context.efer = self.get_register(HvX64RegisterName::Efer.into(), None)?.as_u64(); + let mut context: InitialVpContextX64 = FromZeros::new_zeroed(); + context.cr0 = self + .get_register(HvX64RegisterName::Cr0.into(), None)? + .as_u64(); + context.cr3 = self + .get_register(HvX64RegisterName::Cr3.into(), None)? + .as_u64(); + context.cr4 = self + .get_register(HvX64RegisterName::Cr4.into(), None)? + .as_u64(); + context.rip = self + .get_register(HvX64RegisterName::Rip.into(), None)? + .as_u64(); + context.rsp = self + .get_register(HvX64RegisterName::Rsp.into(), None)? + .as_u64(); + context.rflags = self + .get_register(HvX64RegisterName::Rflags.into(), None)? + .as_u64(); + context.cs = self + .get_register(HvX64RegisterName::Cs.into(), None)? + .as_segment(); + context.ss = self + .get_register(HvX64RegisterName::Ss.into(), None)? + .as_segment(); + context.ds = self + .get_register(HvX64RegisterName::Ds.into(), None)? + .as_segment(); + context.es = self + .get_register(HvX64RegisterName::Es.into(), None)? + .as_segment(); + context.fs = self + .get_register(HvX64RegisterName::Fs.into(), None)? + .as_segment(); + context.gs = self + .get_register(HvX64RegisterName::Gs.into(), None)? + .as_segment(); + context.gdtr = self + .get_register(HvX64RegisterName::Gdtr.into(), None)? + .as_table(); + context.idtr = self + .get_register(HvX64RegisterName::Idtr.into(), None)? + .as_table(); + context.tr = self + .get_register(HvX64RegisterName::Tr.into(), None)? + .as_segment(); + context.efer = self + .get_register(HvX64RegisterName::Efer.into(), None)? + .as_u64(); Ok(context) } @@ -446,48 +514,57 @@ impl HvCall { let control: hvdef::hypercall::Control = hvdef::hypercall::Control::new() .with_code(hvdef::HypercallCode::HvCallVtlCall.0) .with_rep_count(0); - - // SAFETY: Invoking hypercall per TLFS spec - unsafe { - invoke_hypercall_vtl( - control, - ); - } + invoke_hypercall_vtl(control); } pub fn vtl_return() { let control: hvdef::hypercall::Control = hvdef::hypercall::Control::new() .with_code(hvdef::HypercallCode::HvCallVtlReturn.0) .with_rep_count(0); - // SAFETY: Invoking hypercall per TLFS spec - unsafe { - invoke_hypercall_vtl(control); - } + invoke_hypercall_vtl(control); } - pub fn enable_vtl_protection(&mut self, vp_index: u32, vtl: HvInputVtl) -> Result<(), hvdef::HvError> { - let hvreg = self.get_register(hvdef::HvX64RegisterName::VsmPartitionConfig.into(), Some(vtl))?; - let mut hvreg: HvRegisterVsmPartitionConfig = HvRegisterVsmPartitionConfig::from_bits(hvreg.as_u64()); + pub fn enable_vtl_protection( + &mut self, + vtl: HvInputVtl, + ) -> Result<(), hvdef::HvError> { + let hvreg = self.get_register( + HvX64RegisterName::VsmPartitionConfig.into(), + Some(vtl), + )?; + let mut hvreg: HvRegisterVsmPartitionConfig = + HvRegisterVsmPartitionConfig::from_bits(hvreg.as_u64()); hvreg.set_enable_vtl_protection(true); // hvreg.set_intercept_page(true); // hvreg.set_default_vtl_protection_mask(0b11); // hvreg.set_intercept_enable_vtl_protection(true); let bits = hvreg.into_bits(); - let hvre: HvRegisterValue = hvdef::HvRegisterValue::from(bits); - self.set_register(HvX64RegisterName::VsmPartitionConfig.into(), hvre, Some(vtl)) + let hvre: HvRegisterValue = HvRegisterValue::from(bits); + self.set_register( + HvX64RegisterName::VsmPartitionConfig.into(), + hvre, + Some(vtl), + ) } #[cfg(target_arch = "x86_64")] - pub fn enable_vp_vtl(&mut self, vp_index: u32, target_vtl : Vtl, vp_context : Option) -> Result<(), hvdef::HvError> { + pub fn enable_vp_vtl( + &mut self, + vp_index: u32, + target_vtl: Vtl, + vp_context: Option, + ) -> Result<(), hvdef::HvError> { let header = hvdef::hypercall::EnableVpVtlX64 { partition_id: hvdef::HV_PARTITION_ID_SELF, vp_index, target_vtl: target_vtl.into(), reserved: [0; 3], - vp_vtl_context: vp_context.unwrap_or( zerocopy::FromZeros::new_zeroed()), + vp_vtl_context: vp_context.unwrap_or(zerocopy::FromZeros::new_zeroed()), }; - header.write_to_prefix(self.input_page().buffer.as_mut_slice()).expect("size of enable_vp_vtl header is not correct"); + header + .write_to_prefix(self.input_page().buffer.as_mut_slice()) + .expect("size of enable_vp_vtl header is not correct"); let output = self.dispatch_hvcall(hvdef::HypercallCode::HvCallEnableVpVtl, None); match output.result() { @@ -497,17 +574,24 @@ impl HvCall { } #[cfg(target_arch = "x86_64")] - pub fn start_virtual_processor(&mut self, vp_index: u32, target_vtl : Vtl, vp_context : Option) -> Result<(), hvdef::HvError> { + pub fn start_virtual_processor( + &mut self, + vp_index: u32, + target_vtl: Vtl, + vp_context: Option, + ) -> Result<(), hvdef::HvError> { let header = hvdef::hypercall::StartVirtualProcessorX64 { partition_id: hvdef::HV_PARTITION_ID_SELF, - vp_index: vp_index, + vp_index, target_vtl: target_vtl.into(), vp_context: vp_context.unwrap_or(zerocopy::FromZeros::new_zeroed()), rsvd0: 0u8, rsvd1: 0u16, }; - header.write_to_prefix(self.input_page().buffer.as_mut_slice()).expect("size of start_virtual_processor header is not correct"); + header + .write_to_prefix(self.input_page().buffer.as_mut_slice()) + .expect("size of start_virtual_processor header is not correct"); let output = self.dispatch_hvcall(hvdef::HypercallCode::HvCallStartVirtualProcessor, None); match output.result() { @@ -516,11 +600,14 @@ impl HvCall { } } - pub fn enable_partition_vtl(&mut self, partition_id: u64, target_vtl : Vtl) -> Result<(), hvdef::HvError> { - let flags: EnablePartitionVtlFlags = - EnablePartitionVtlFlags::new() - .with_enable_mbec(false) - .with_enable_supervisor_shadow_stack(false); + pub fn enable_partition_vtl( + &mut self, + partition_id: u64, + target_vtl: Vtl, + ) -> Result<(), hvdef::HvError> { + let flags: EnablePartitionVtlFlags = EnablePartitionVtlFlags::new() + .with_enable_mbec(false) + .with_enable_supervisor_shadow_stack(false); let header = hvdef::hypercall::EnablePartitionVtl { partition_id, @@ -589,7 +676,7 @@ impl HvCall { let remaining_pages = range.end_4k_gpn() - current_page; let count = remaining_pages.min(MAX_INPUT_ELEMENTS as u64); - header.write_to_prefix(self.input_page().buffer.as_mut_slice()); + let _ = header.write_to_prefix(self.input_page().buffer.as_mut_slice()); let output = self.dispatch_hvcall( hvdef::HypercallCode::HvCallAcceptGpaPages, @@ -625,8 +712,8 @@ impl HvCall { const MAX_PER_CALL: usize = 512; for hw_ids in hw_ids.chunks(MAX_PER_CALL) { - header.write_to_prefix(self.input_page().buffer.as_mut_slice()); - hw_ids.write_to_prefix(&mut self.input_page().buffer[header.as_bytes().len()..]); + let _ = header.write_to_prefix(self.input_page().buffer.as_mut_slice()); + let _ = hw_ids.write_to_prefix(&mut self.input_page().buffer[header.as_bytes().len()..]); // SAFETY: The input header and rep slice are the correct types for this hypercall. // The hypercall output is validated right after the hypercall is issued. @@ -659,4 +746,4 @@ pub type HwId = u32; /// The "hardware ID" used for [`HvCall::get_vp_index_from_hw_id`]. This is the /// MPIDR on ARM64. #[cfg(target_arch = "aarch64")] -pub type HwId = u64; \ No newline at end of file +pub type HwId = u64; diff --git a/opentmk/src/uefi/hypvctx.rs b/opentmk/src/uefi/hypvctx.rs index 79e74cc057..86f24eee96 100644 --- a/opentmk/src/uefi/hypvctx.rs +++ b/opentmk/src/uefi/hypvctx.rs @@ -3,21 +3,16 @@ use super::{ hypercall::HvCall, }; use crate::uefi::alloc::ALLOCATOR; -use crate::{debuglog, slog::AssertResult}; -use crate::{ - infolog, - slog::AssertOption, - sync::{Channel, Receiver, Sender}, -}; +use crate::tmk_assert::AssertResult; +use crate::tmk_assert::AssertOption; use alloc::collections::btree_map::BTreeMap; use alloc::collections::linked_list::LinkedList; use alloc::{boxed::Box, vec::Vec}; use core::alloc::{GlobalAlloc, Layout}; use core::arch::asm; use core::ops::Range; -use core::sync::atomic::{AtomicBool, Ordering}; use hvdef::hypercall::{HvInputVtl, InitialVpContextX64}; -use hvdef::{HvAllArchRegisterName, HvRegisterName, Vtl}; +use hvdef::Vtl; use memory_range::MemoryRange; use minimal_rt::arch::msr::{read_msr, write_msr}; use spin::Mutex; @@ -28,34 +23,26 @@ type ComandTable = BTreeMap, Vtl)>>; static mut CMD: Mutex = Mutex::new(BTreeMap::new()); +#[allow(static_mut_refs)] fn cmdt() -> &'static Mutex { unsafe { &CMD } } -struct VpContext { - #[cfg(target_arch = "x86_64")] - ctx: InitialVpContextX64, - #[cfg(target_arch = "aarch64")] - ctx: InitialVpContextAarch64, -} - fn register_command_queue(vp_index: u32) { - unsafe { - debuglog!("registering command queue for vp: {}", vp_index); - if CMD.lock().get(&vp_index).is_none() { - CMD.lock().insert(vp_index, LinkedList::new()); - debuglog!("registered command queue for vp: {}", vp_index); + log::debug!("registering command queue for vp: {}", vp_index); + if cmdt().lock().get(&vp_index).is_none() { + cmdt().lock().insert(vp_index, LinkedList::new()); + log::debug!("registered command queue for vp: {}", vp_index); } else { - debuglog!("command queue already registered for vp: {}", vp_index); + log::debug!("command queue already registered for vp: {}", vp_index); } - } } pub struct HvTestCtx { pub hvcall: HvCall, pub vp_runing: Vec<(u32, (bool, bool))>, pub my_vp_idx: u32, - senders: Vec<(u64, Sender<(Box, Vtl)>)>, + pub my_vtl: Vtl, } impl Drop for HvTestCtx { @@ -138,8 +125,8 @@ impl TestCtxTrait for HvTestCtx { } let is_vp_running = self.vp_runing.iter_mut().find(|x| x.0 == vp_index); - if let Some(running_vtl) = is_vp_running { - debuglog!("both vtl0 and vtl1 are running for VP: {:?}", vp_index); + if let Some(_running_vtl) = is_vp_running { + log::debug!("both vtl0 and vtl1 are running for VP: {:?}", vp_index); } else { if vp_index == 0 { let vp_context = self @@ -158,7 +145,6 @@ impl TestCtxTrait for HvTestCtx { self.switch_to_high_vtl(); self.vp_runing.push((vp_index, (true, true))); } else { - let my_idx = self.my_vp_idx; cmdt().lock().get_mut(&self.my_vp_idx).unwrap().push_back(( Box::new(move |ctx| { ctx.enable_vp_vtl_with_default_context(vp_index, Vtl::Vtl1); @@ -186,7 +172,7 @@ impl TestCtxTrait for HvTestCtx { .get_mut(&vp_index) .unwrap() .push_back((cmd, vtl)); - if vp_index == self.my_vp_idx && self.hvcall.vtl != vtl { + if vp_index == self.my_vp_idx && self.my_vtl != vtl { if vtl == Vtl::Vtl0 { self.switch_to_low_vtl(); } else { @@ -218,7 +204,7 @@ impl TestCtxTrait for HvTestCtx { self.hvcall .enable_partition_vtl(hvdef::HV_PARTITION_ID_SELF, vtl) .expect_assert("Failed to enable VTL1 for the partition"); - infolog!("enabled vtl protections for the partition."); + log::info!("enabled vtl protections for the partition."); } fn setup_interrupt_handler(&mut self) { crate::arch::interrupt::init(); @@ -226,10 +212,10 @@ impl TestCtxTrait for HvTestCtx { fn setup_vtl_protection(&mut self) { self.hvcall - .enable_vtl_protection(0, HvInputVtl::CURRENT_VTL) + .enable_vtl_protection(HvInputVtl::CURRENT_VTL) .expect_assert("Failed to enable VTL protection, vtl1"); - infolog!("enabled vtl protections for the partition."); + log::info!("enabled vtl protections for the partition."); } fn setup_secure_intercept(&mut self, interrupt_idx: u8) { @@ -241,7 +227,7 @@ impl TestCtxTrait for HvTestCtx { let reg = (gpn << 12) | 0x1; unsafe { write_msr(hvdef::HV_X64_MSR_SIMP, reg.into()) }; - infolog!("Successfuly set the SIMP register."); + log::info!("Successfuly set the SIMP register."); let reg = unsafe { read_msr(hvdef::HV_X64_MSR_SINT0) }; let mut reg: hvdef::HvSynicSint = reg.into(); @@ -250,7 +236,7 @@ impl TestCtxTrait for HvTestCtx { reg.set_auto_eoi(true); self.write_msr(hvdef::HV_X64_MSR_SINT0, reg.into()); - infolog!("Successfuly set the SINT0 register."); + log::info!("Successfuly set the SINT0 register."); } fn apply_vtl_protection_for_memory(&mut self, range: Range, vtl: Vtl) { @@ -268,7 +254,7 @@ impl TestCtxTrait for HvTestCtx { } fn start_running_vp_with_default_context(&mut self, cmd: VpExecutor) { - let (vp_index, vtl, cmd) = cmd.get(); + let (vp_index, vtl, _cmd) = cmd.get(); let vp_ctx = self .get_default_context() .expect_assert("error: failed to get default context"); @@ -282,7 +268,6 @@ impl TestCtxTrait for HvTestCtx { Vtl::Vtl0 => 0, Vtl::Vtl1 => 1, Vtl::Vtl2 => 2, - _ => panic!("error: invalid vtl"), }; let vp_context = self .get_default_context() @@ -310,19 +295,19 @@ impl TestCtxTrait for HvTestCtx { } #[cfg(target_arch = "x86_64")] - fn set_interupt_idx(&mut self, interrupt_idx: u8, handler: fn()) { + fn set_interrupt_idx(&mut self, interrupt_idx: u8, handler: fn()) { crate::arch::interrupt::set_handler(interrupt_idx, handler); } + #[cfg(target_arch = "x86_64")] fn get_vp_count(&self) -> u32 { - let mut result: u32 = 0; - + let mut result: u32; unsafe { // Call CPUID with EAX=1, but work around the rbx constraint asm!( "push rbx", // Save rbx "cpuid", // Execute CPUID - "mov {result}, rbx", // Store ebx to our result variable + "mov {result:r}, rbx", // Store ebx to our result variable "pop rbx", // Restore rbx in("eax") 1u32, // Input: CPUID leaf 1 out("ecx") _, // Output registers (not used) @@ -363,7 +348,7 @@ impl TestCtxTrait for HvTestCtx { } fn get_current_vtl(&self) -> Vtl { - self.hvcall.vtl + self.my_vtl } } @@ -373,7 +358,7 @@ impl HvTestCtx { hvcall: HvCall::new(), vp_runing: Vec::new(), my_vp_idx: 0, - senders: Vec::new(), + my_vtl: Vtl::Vtl0, } } @@ -383,6 +368,7 @@ impl HvTestCtx { for i in 0..vp_count { register_command_queue(i); } + self.my_vtl = self.hvcall.vtl(); } fn exec_handler() { @@ -400,14 +386,14 @@ impl HvTestCtx { let mut cmd: Option> = None; { - let mut d = unsafe { CMD.lock() }; - let mut d = d.get_mut(&ctx.my_vp_idx); + let mut cmdt = cmdt().lock(); + let d = cmdt.get_mut(&ctx.my_vp_idx); if d.is_some() { - let mut d = d.unwrap(); + let d = d.unwrap(); if !d.is_empty() { - let (c, v) = d.front().unwrap(); - if *v == ctx.hvcall.vtl { - let (c, v) = d.pop_front().unwrap(); + let (_c, v) = d.front().unwrap(); + if *v == ctx.my_vtl { + let (c, _v) = d.pop_front().unwrap(); cmd = Some(c); } else { vtl = Some(*v); @@ -417,7 +403,7 @@ impl HvTestCtx { } if let Some(vtl) = vtl { - if (vtl == Vtl::Vtl0) { + if vtl == Vtl::Vtl0 { ctx.switch_to_low_vtl(); } else { ctx.switch_to_high_vtl(); @@ -455,11 +441,6 @@ impl HvTestCtx { let fn_address = fn_ptr as u64; vp_context.rip = fn_address; vp_context.rsp = stack_top; - // print stack range - let stack_range = Range { - start: x as u64, - end: x as u64 + sz as u64, - }; Ok(vp_context) } } diff --git a/opentmk/src/uefi/init.rs b/opentmk/src/uefi/init.rs index 1f0535d479..aa2eac3402 100644 --- a/opentmk/src/uefi/init.rs +++ b/opentmk/src/uefi/init.rs @@ -1,21 +1,19 @@ -use core::alloc::{GlobalAlloc, Layout}; +use uefi::{boot::{exit_boot_services, MemoryType}, guid, CStr16, Status}; -use uefi::{boot::{exit_boot_services, MemoryType}, guid, println, CStr16, Status}; - -use crate::infolog; - -use super::{alloc::ALLOCATOR}; +use super::alloc::ALLOCATOR; +const EFI_GUID: uefi::Guid = guid!("610b9e98-c6f6-47f8-8b47-2d2da0d52a91"); +const OS_LOADER_INDICATIONS: &'static str = "OsLoaderIndications"; fn enable_uefi_vtl_protection() { let mut buf = vec![0u8; 1024]; let mut str_buff = vec![0u16; 1024]; let os_loader_indications_key = - CStr16::from_str_with_buf(&"OsLoaderIndications", str_buff.as_mut_slice()).unwrap(); + CStr16::from_str_with_buf(OS_LOADER_INDICATIONS, str_buff.as_mut_slice()).unwrap(); let os_loader_indications_result = uefi::runtime::get_variable( os_loader_indications_key, - &uefi::runtime::VariableVendor(guid!("610b9e98-c6f6-47f8-8b47-2d2da0d52a91")), + &uefi::runtime::VariableVendor(EFI_GUID), buf.as_mut(), ) .expect("Failed to get OsLoaderIndications"); @@ -31,27 +29,28 @@ fn enable_uefi_vtl_protection() { let _ = uefi::runtime::set_variable( os_loader_indications_key, - &uefi::runtime::VariableVendor(guid!("610b9e98-c6f6-47f8-8b47-2d2da0d52a91")), + &uefi::runtime::VariableVendor(EFI_GUID), os_loader_indications_result.1, &os_loader_indications, ) .expect("Failed to set OsLoaderIndications"); - let os_loader_indications_result = uefi::runtime::get_variable( + let _os_loader_indications_result = uefi::runtime::get_variable( os_loader_indications_key, - &uefi::runtime::VariableVendor(guid!("610b9e98-c6f6-47f8-8b47-2d2da0d52a91")), + &uefi::runtime::VariableVendor(EFI_GUID), buf.as_mut(), ) .expect("Failed to get OsLoaderIndications"); - let _ = unsafe { exit_boot_services(MemoryType::BOOT_SERVICES_DATA) }; + let _memory_map = unsafe { exit_boot_services(MemoryType::BOOT_SERVICES_DATA) }; } pub fn init() -> Result<(), Status> { - let r: bool = unsafe { ALLOCATOR.init(2048) }; + let r: bool = ALLOCATOR.init(2048); if r == false { return Err(Status::ABORTED); } + crate::tmk_logger::init().expect("Failed to init logger"); enable_uefi_vtl_protection(); Ok(()) } \ No newline at end of file diff --git a/opentmk/src/uefi/mod.rs b/opentmk/src/uefi/mod.rs index 08cadaa5b5..ed2b2e0001 100644 --- a/opentmk/src/uefi/mod.rs +++ b/opentmk/src/uefi/mod.rs @@ -9,21 +9,7 @@ pub mod init; mod rt; mod tests; -use crate::slog::{AssertOption, AssertResult}; -use crate::sync::{Channel, Receiver, Sender}; -use crate::uefi::alloc::ALLOCATOR; -use crate::{infolog, tmk_assert}; -use ::alloc::boxed::Box; -use ::alloc::vec::Vec; -use alloc::SIZE_1MB; -use context::{TestCtxTrait, VpExecutor}; -use core::alloc::{GlobalAlloc, Layout}; -use core::cell::RefCell; -use core::ops::Range; -use core::sync::atomic::{AtomicI32, Ordering}; -use hvdef::hypercall::HvInputVtl; -use hvdef::Vtl; -use hypvctx::HvTestCtx; +use crate::tmk_assert::AssertResult; use init::init; use uefi::entry; use uefi::Status; diff --git a/opentmk/src/uefi/rt.rs b/opentmk/src/uefi/rt.rs index 1623e2f960..d8a80b4b53 100644 --- a/opentmk/src/uefi/rt.rs +++ b/opentmk/src/uefi/rt.rs @@ -5,17 +5,11 @@ #![cfg(target_os = "uefi")] // UNSAFETY: Raw assembly needed for panic handling to abort. -#![expect(unsafe_code)] - -use crate::arch::serial::{InstrIoAccess, Serial}; -use crate::slog; -use crate::sync::Mutex; use core::arch::asm; -use core::fmt::Write; #[panic_handler] fn panic_handler(panic: &core::panic::PanicInfo<'_>) -> ! { - crate::errorlog!("Panic at runtime: {}", panic); + log::error!("Panic at runtime: {}", panic); unsafe { asm!("int 8H"); } diff --git a/opentmk/src/uefi/tests/hv_misc.rs b/opentmk/src/uefi/tests/hv_misc.rs index 3bda306d70..56229d0fb0 100644 --- a/opentmk/src/uefi/tests/hv_misc.rs +++ b/opentmk/src/uefi/tests/hv_misc.rs @@ -1,12 +1,13 @@ +#![allow(warnings)] // WIP : This test is not yet complete and is not expected to pass. // // This test is to verify that the VTL protections are working as expected. // The stack values in VTL0 are changing after interrupt handling in VTL1. -use crate::slog::{AssertOption, AssertResult}; +use crate::tmk_assert::{AssertOption, AssertResult}; use crate::sync::{Channel, Receiver, Sender}; use crate::uefi::alloc::{ALLOCATOR, SIZE_1MB}; use crate::uefi::{context, hypvctx}; -use crate::{infolog, tmk_assert}; +use crate::{tmk_assert}; use ::alloc::boxed::Box; use alloc::sync::Arc; use ::alloc::vec::Vec; @@ -26,23 +27,23 @@ static mut HEAPX: RefCell<*mut u8> = RefCell::new(0 as *mut u8); static mut CON: AtomicI32 = AtomicI32::new(0); pub fn exec(ctx: &mut hypvctx::HvTestCtx ) { - infolog!("ctx ptr: {:p}", &ctx as *const _); + log::info!("ctx ptr: {:p}", &ctx as *const _); let mut vp_count = ctx.get_vp_count(); tmk_assert!(vp_count == 8, "vp count should be 8"); ctx.setup_interrupt_handler(); - infolog!("set intercept handler successfully!"); + log::info!("set intercept handler successfully!"); ctx.setup_partition_vtl(Vtl::Vtl1); ctx.start_on_vp( VpExecutor::new(0, Vtl::Vtl1).command(move |ctx: &mut dyn TestCtxTrait| { - infolog!("successfully started running VTL1 on vp0."); + log::info!("successfully started running VTL1 on vp0."); ctx.setup_secure_intercept(0x30); - ctx.set_interupt_idx(0x30, || { - infolog!("interrupt fired!"); + ctx.set_interrupt_idx(0x30, || { + log::info!("interrupt fired!"); let mut hv_test_ctx = HvTestCtx::new(); hv_test_ctx.init(); @@ -51,15 +52,15 @@ pub fn exec(ctx: &mut hypvctx::HvTestCtx ) { let cp = HvRegisterVsmVpStatus::from_bits(c as u64); - infolog!("VSM VP Status: {:?}", cp); + log::info!("VSM VP Status: {:?}", cp); - infolog!("interrupt handled!"); + log::info!("interrupt handled!"); }); let layout = Layout::from_size_align(SIZE_1MB, 4096).expect("msg: failed to create layout"); let ptr = unsafe { ALLOCATOR.alloc(layout) }; - infolog!("allocated some memory in the heap from vtl1"); + log::info!("allocated some memory in the heap from vtl1"); unsafe { let mut z = HEAPX.borrow_mut(); *z = ptr; @@ -69,7 +70,7 @@ pub fn exec(ctx: &mut hypvctx::HvTestCtx ) { let size = layout.size(); ctx.setup_vtl_protection(); - infolog!("enabled vtl protections for the partition."); + log::info!("enabled vtl protections for the partition."); let range = Range { start: ptr as u64, @@ -78,26 +79,26 @@ pub fn exec(ctx: &mut hypvctx::HvTestCtx ) { ctx.apply_vtl_protection_for_memory(range, Vtl::Vtl1); - infolog!("moving to vtl0 to attempt to read the heap memory"); + log::info!("moving to vtl0 to attempt to read the heap memory"); ctx.switch_to_low_vtl(); }), ); ctx.queue_command_vp(VpExecutor::new(0, Vtl::Vtl1).command(move |ctx| { - infolog!("successfully started running VTL1 on vp0."); + log::info!("successfully started running VTL1 on vp0."); ctx.switch_to_low_vtl(); })); - infolog!("ctx ptr: {:p}", &ctx as *const _); + log::info!("ctx ptr: {:p}", &ctx as *const _); let mut l = 0u64; unsafe { asm!("mov {}, rsp", out(reg) l) }; - infolog!("rsp: 0x{:x}", l); + log::info!("rsp: 0x{:x}", l); unsafe { - infolog!("Attempting to read heap memory from vtl0"); + log::info!("Attempting to read heap memory from vtl0"); let heapx = *HEAPX.borrow(); let val = *(heapx.add(10)); - infolog!( + log::info!( "reading mutated heap memory from vtl0(it should not be 0xAA): 0x{:x}", val ); @@ -108,27 +109,27 @@ pub fn exec(ctx: &mut hypvctx::HvTestCtx ) { } unsafe { asm!("mov {}, rsp", out(reg) l) }; - infolog!("rsp: 0x{:x}", l); + log::info!("rsp: 0x{:x}", l); // let (mut tx, mut rx) = Channel::new(1); // { // let mut tx = tx.clone(); // ctx.start_on_vp(VpExecutor::new(2, Vtl::Vtl0).command( // move |ctx: &mut dyn TestCtxTrait| { - // infolog!("Hello form vtl0 on vp2!"); + // log::info!("Hello form vtl0 on vp2!"); // tx.send(()); // }, // )); // } - infolog!("ctx ptr: {:p}", &ctx as *const _); + log::info!("ctx ptr: {:p}", &ctx as *const _); let c = ctx.get_vp_count(); tmk_assert!(c == 8, "vp count should be 8"); // rx.recv(); - infolog!("we are in vtl0 now!"); - infolog!("we reached the end of the test"); + log::info!("we are in vtl0 now!"); + log::info!("we reached the end of the test"); loop { } diff --git a/opentmk/src/uefi/tests/hv_processor.rs b/opentmk/src/uefi/tests/hv_processor.rs index 6839669694..6547087202 100644 --- a/opentmk/src/uefi/tests/hv_processor.rs +++ b/opentmk/src/uefi/tests/hv_processor.rs @@ -1,8 +1,7 @@ -use alloc::vec::Vec; use hvdef::Vtl; use crate::{ - criticallog, infolog, sync::{self, Mutex}, tmk_assert, uefi::context::{TestCtxTrait, VpExecutor} + tmk_assert, uefi::context::{TestCtxTrait, VpExecutor} }; pub fn exec(ctx: &mut dyn TestCtxTrait) { @@ -14,60 +13,60 @@ pub fn exec(ctx: &mut dyn TestCtxTrait) { // Testing BSP VTL Bringup { - let (mut tx, mut rx) = crate::sync::Channel::new().split(); + let (tx, rx) = crate::sync::Channel::new().split(); ctx.start_on_vp(VpExecutor::new(0, Vtl::Vtl1).command( move |ctx: &mut dyn TestCtxTrait| { let vp = ctx.get_current_vp(); - infolog!("vp: {}", vp); + log::info!("vp: {}", vp); tmk_assert!(vp == 0, "vp should be equal to 0"); let vtl = ctx.get_current_vtl(); - infolog!("vtl: {:?}", vtl); + log::info!("vtl: {:?}", vtl); tmk_assert!(vtl == Vtl::Vtl1, "vtl should be Vtl1 for BSP"); - tx.send(()); + _ = tx.send(()); ctx.switch_to_low_vtl(); }, )); - rx.recv(); + _ = rx.recv(); } for i in 1..vp_count { // Testing VTL1 { - let (mut tx, mut rx) = crate::sync::Channel::new().split(); + let (tx, rx) = crate::sync::Channel::new().split(); ctx.start_on_vp(VpExecutor::new(i, Vtl::Vtl1).command( move |ctx: &mut dyn TestCtxTrait| { let vp = ctx.get_current_vp(); - infolog!("vp: {}", vp); + log::info!("vp: {}", vp); tmk_assert!(vp == i, format!("vp should be equal to {}", i)); let vtl = ctx.get_current_vtl(); - infolog!("vtl: {:?}", vtl); - tmk_assert!(vtl == Vtl::Vtl1, format!("vtl should be Vtl0 for VP {}", i)); - tx.send(()); + log::info!("vtl: {:?}", vtl); + tmk_assert!(vtl == Vtl::Vtl1, format!("vtl should be Vtl1 for VP {}", i)); + _ = tx.send(()); }, )); - rx.clone().recv(); + _ = rx.recv(); } // Testing VTL0 { - let (mut tx, mut rx) = crate::sync::Channel::new().split(); + let (tx, rx) = crate::sync::Channel::new().split(); ctx.start_on_vp(VpExecutor::new(i, Vtl::Vtl0).command( move |ctx: &mut dyn TestCtxTrait| { let vp = ctx.get_current_vp(); - infolog!("vp: {}", vp); + log::info!("vp: {}", vp); tmk_assert!(vp == i, format!("vp should be equal to {}", i)); let vtl = ctx.get_current_vtl(); - infolog!("vtl: {:?}", vtl); + log::info!("vtl: {:?}", vtl); tmk_assert!(vtl == Vtl::Vtl0, format!("vtl should be Vtl0 for VP {}", i)); - tx.send(()); + _ = tx.send(()); }, )); - rx.clone().recv(); + _ = rx.recv(); } } - criticallog!("All VPs have been tested"); + log::warn!("All VPs have been tested"); } diff --git a/opentmk/src/uefi/tests/mod.rs b/opentmk/src/uefi/tests/mod.rs index 9f5a7be616..665873ac3d 100644 --- a/opentmk/src/uefi/tests/mod.rs +++ b/opentmk/src/uefi/tests/mod.rs @@ -1,5 +1,3 @@ -use alloc::sync::Arc; - use super::hypvctx::HvTestCtx; pub mod hv_processor; From 53e0da2eef29fc159c969771d57b4058958e6a6c Mon Sep 17 00:00:00 2001 From: Mayank Kumar Date: Mon, 12 May 2025 05:50:20 +0000 Subject: [PATCH 06/10] chore: resolve PR feedback --- Cargo.toml | 12 +++++++-- opentmk/{ => opentmk}/Cargo.toml | 26 +++++++++---------- opentmk/{ => opentmk}/README.md | 2 +- opentmk/{ => opentmk}/build_deploy.sh | 0 .../src/arch/aarch64/hypercall.rs | 0 opentmk/{ => opentmk}/src/arch/aarch64/mod.rs | 0 opentmk/{ => opentmk}/src/arch/mod.rs | 0 .../src/arch/x86_64/hypercall.rs | 0 .../src/arch/x86_64/interrupt.rs | 2 +- .../arch/x86_64/interrupt_handler_register.rs | 2 +- opentmk/{ => opentmk}/src/arch/x86_64/mod.rs | 0 .../{ => opentmk}/src/arch/x86_64/serial.rs | 2 +- opentmk/{ => opentmk}/src/main.rs | 6 ----- opentmk/{ => opentmk}/src/tests/hv_misc.rs | 0 .../{ => opentmk}/src/tests/hv_processor.rs | 0 opentmk/{ => opentmk}/src/tests/mod.rs | 0 opentmk/{ => opentmk}/src/tmk_assert.rs | 10 +++---- opentmk/{ => opentmk}/src/tmk_logger.rs | 2 +- opentmk/{ => opentmk}/src/uefi/alloc.rs | 2 +- opentmk/{ => opentmk}/src/uefi/context.rs | 0 opentmk/{ => opentmk}/src/uefi/hypercall.rs | 0 opentmk/{ => opentmk}/src/uefi/hypvctx.rs | 2 +- opentmk/{ => opentmk}/src/uefi/init.rs | 0 opentmk/{ => opentmk}/src/uefi/mod.rs | 0 opentmk/{ => opentmk}/src/uefi/rt.rs | 0 .../{ => opentmk}/src/uefi/tests/hv_misc.rs | 2 +- .../src/uefi/tests/hv_processor.rs | 7 ++--- opentmk/{ => opentmk}/src/uefi/tests/mod.rs | 0 opentmk/sync/Cargo.toml | 12 +++++++++ opentmk/{src/sync.rs => sync/src/lib.rs} | 5 +++- 30 files changed, 56 insertions(+), 38 deletions(-) rename opentmk/{ => opentmk}/Cargo.toml (62%) rename opentmk/{ => opentmk}/README.md (78%) rename opentmk/{ => opentmk}/build_deploy.sh (100%) rename opentmk/{ => opentmk}/src/arch/aarch64/hypercall.rs (100%) rename opentmk/{ => opentmk}/src/arch/aarch64/mod.rs (100%) rename opentmk/{ => opentmk}/src/arch/mod.rs (100%) rename opentmk/{ => opentmk}/src/arch/x86_64/hypercall.rs (100%) rename opentmk/{ => opentmk}/src/arch/x86_64/interrupt.rs (98%) rename opentmk/{ => opentmk}/src/arch/x86_64/interrupt_handler_register.rs (99%) rename opentmk/{ => opentmk}/src/arch/x86_64/mod.rs (100%) rename opentmk/{ => opentmk}/src/arch/x86_64/serial.rs (99%) rename opentmk/{ => opentmk}/src/main.rs (57%) rename opentmk/{ => opentmk}/src/tests/hv_misc.rs (100%) rename opentmk/{ => opentmk}/src/tests/hv_processor.rs (100%) rename opentmk/{ => opentmk}/src/tests/mod.rs (100%) rename opentmk/{ => opentmk}/src/tmk_assert.rs (88%) rename opentmk/{ => opentmk}/src/tmk_logger.rs (97%) rename opentmk/{ => opentmk}/src/uefi/alloc.rs (99%) rename opentmk/{ => opentmk}/src/uefi/context.rs (100%) rename opentmk/{ => opentmk}/src/uefi/hypercall.rs (100%) rename opentmk/{ => opentmk}/src/uefi/hypvctx.rs (99%) rename opentmk/{ => opentmk}/src/uefi/init.rs (100%) rename opentmk/{ => opentmk}/src/uefi/mod.rs (100%) rename opentmk/{ => opentmk}/src/uefi/rt.rs (100%) rename opentmk/{ => opentmk}/src/uefi/tests/hv_misc.rs (98%) rename opentmk/{ => opentmk}/src/uefi/tests/hv_processor.rs (92%) rename opentmk/{ => opentmk}/src/uefi/tests/mod.rs (100%) create mode 100644 opentmk/sync/Cargo.toml rename opentmk/{src/sync.rs => sync/src/lib.rs} (99%) diff --git a/Cargo.toml b/Cargo.toml index fd2106e652..ed1823f711 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -42,7 +42,9 @@ members = [ "vm/loader/igvmfilegen", "vm/vmgs/vmgs_lib", "vm/vmgs/vmgstool", - "opentmk" + # opentmk + "opentmk/opentmk", + "opentmk/sync" ] exclude = [ "xsync", @@ -375,6 +377,9 @@ vnc_worker_defs = { path = "workers/vnc_worker_defs" } vnc = { path = "workers/vnc_worker/vnc" } profiler_worker = { path = "openhcl/profiler_worker" } +# opentmk +sync_nostd = { path = "opentmk/sync"} + # crates.io anyhow = "1.0" arbitrary = "1.3" @@ -446,9 +451,11 @@ jiff = "0.1" kvm-bindings = "0.7" # Use of these specific REPO will go away when changes are taken upstream. landlock = "0.3.1" +lazy_static = { version = "1.4.0", features = ["spin_no_std"] } libc = "0.2" libfuzzer-sys = "0.4" libtest-mimic = "0.8" +linked_list_allocator = "0.10.5" linkme = "0.3.9" log = "0.4" macaddr = "1.0" @@ -494,6 +501,7 @@ smallbox = "0.8" smallvec = "1.8" smoltcp = { version = "0.8", default-features = false } socket2 = "0.5" +spin = "0.10.0" stackfuture = "0.3" static_assertions = "1.1" syn = "2" @@ -521,12 +529,12 @@ winapi = "0.3" windows = "0.59" windows-service = "0.7" windows-sys = "0.52" +x86_64 = "0.15.2" xshell = "=0.2.2" # pin to 0.2.2 to work around https://github.com/matklad/xshell/issues/63 xshell-macros = "0.2" # We add the derive feature here since the vast majority of our crates use it. #zerocopy = { version = "0.7.32", features = ["derive"]} zerocopy = { version = "0.8.14", features = ["derive"]} -linked_list_allocator = "0.10.5" [workspace.metadata.xtask.unused-deps] # Pulled in through "tracing", but we need to pin the version diff --git a/opentmk/Cargo.toml b/opentmk/opentmk/Cargo.toml similarity index 62% rename from opentmk/Cargo.toml rename to opentmk/opentmk/Cargo.toml index 70f4c701c4..27eeee0c6c 100644 --- a/opentmk/Cargo.toml +++ b/opentmk/opentmk/Cargo.toml @@ -7,22 +7,22 @@ edition.workspace = true rust-version.workspace = true [dependencies] -uefi = { workspace = true, features = ["alloc"] } -minimal_rt.workspace = true -linked_list_allocator = { workspace = true } -hvdef = {workspace = true} -zerocopy = {workspace = true} -memory_range = { workspace = true } -arrayvec = {workspace = true} -cfg-if.workspace = true +arrayvec.workspace = true bitfield-struct.workspace = true -x86_64 = "0.15.2" -lazy_static = { version = "1.4.0", features = ["spin_no_std"] } +cfg-if.workspace = true +hvdef = {workspace = true} +lazy_static.workspace = true +linked_list_allocator.workspace = true +log.workspace = true +memory_range.workspace = true +minimal_rt.workspace = true +serde = { version = "1.0", default-features = false} serde_json = { version = "1.0", default-features = false, features = ["alloc"] } -spin = "0.10.0" -serde = {version = "1.0", default-features = false} -log = { version = "0.4", features = ["serde"] } +uefi = { workspace = true, features = ["alloc"] } +x86_64.workspace = true x86defs.workspace = true +zerocopy.workspace = true +sync_nostd.workspace = true [lints] workspace = true diff --git a/opentmk/README.md b/opentmk/opentmk/README.md similarity index 78% rename from opentmk/README.md rename to opentmk/opentmk/README.md index 999308fc0b..a2658e8753 100644 --- a/opentmk/README.md +++ b/opentmk/opentmk/README.md @@ -1,3 +1,3 @@ -# `guest_test_uefi` +# OpenTMK See the guide for more info on how to build/run the code in this crate. diff --git a/opentmk/build_deploy.sh b/opentmk/opentmk/build_deploy.sh similarity index 100% rename from opentmk/build_deploy.sh rename to opentmk/opentmk/build_deploy.sh diff --git a/opentmk/src/arch/aarch64/hypercall.rs b/opentmk/opentmk/src/arch/aarch64/hypercall.rs similarity index 100% rename from opentmk/src/arch/aarch64/hypercall.rs rename to opentmk/opentmk/src/arch/aarch64/hypercall.rs diff --git a/opentmk/src/arch/aarch64/mod.rs b/opentmk/opentmk/src/arch/aarch64/mod.rs similarity index 100% rename from opentmk/src/arch/aarch64/mod.rs rename to opentmk/opentmk/src/arch/aarch64/mod.rs diff --git a/opentmk/src/arch/mod.rs b/opentmk/opentmk/src/arch/mod.rs similarity index 100% rename from opentmk/src/arch/mod.rs rename to opentmk/opentmk/src/arch/mod.rs diff --git a/opentmk/src/arch/x86_64/hypercall.rs b/opentmk/opentmk/src/arch/x86_64/hypercall.rs similarity index 100% rename from opentmk/src/arch/x86_64/hypercall.rs rename to opentmk/opentmk/src/arch/x86_64/hypercall.rs diff --git a/opentmk/src/arch/x86_64/interrupt.rs b/opentmk/opentmk/src/arch/x86_64/interrupt.rs similarity index 98% rename from opentmk/src/arch/x86_64/interrupt.rs rename to opentmk/opentmk/src/arch/x86_64/interrupt.rs index 952629be30..1f31036de8 100644 --- a/opentmk/src/arch/x86_64/interrupt.rs +++ b/opentmk/opentmk/src/arch/x86_64/interrupt.rs @@ -1,6 +1,6 @@ use x86_64::structures::idt::{InterruptDescriptorTable, InterruptStackFrame}; use lazy_static::lazy_static; -use crate::sync::Mutex; +use sync_nostd::Mutex; use super::interrupt_handler_register::{register_interrupt_handler, set_common_handler}; diff --git a/opentmk/src/arch/x86_64/interrupt_handler_register.rs b/opentmk/opentmk/src/arch/x86_64/interrupt_handler_register.rs similarity index 99% rename from opentmk/src/arch/x86_64/interrupt_handler_register.rs rename to opentmk/opentmk/src/arch/x86_64/interrupt_handler_register.rs index 25522d3c38..596be70cd0 100644 --- a/opentmk/src/arch/x86_64/interrupt_handler_register.rs +++ b/opentmk/opentmk/src/arch/x86_64/interrupt_handler_register.rs @@ -1,7 +1,7 @@ #![allow(dead_code)] use x86_64::structures::idt::{InterruptDescriptorTable, InterruptStackFrame}; -use crate::sync::Mutex; +use sync_nostd::Mutex; static mut COMMON_HANDLER: fn(InterruptStackFrame, u8) = common_handler; static COMMON_HANDLER_MUTEX: Mutex<()> = Mutex::new(()); diff --git a/opentmk/src/arch/x86_64/mod.rs b/opentmk/opentmk/src/arch/x86_64/mod.rs similarity index 100% rename from opentmk/src/arch/x86_64/mod.rs rename to opentmk/opentmk/src/arch/x86_64/mod.rs diff --git a/opentmk/src/arch/x86_64/serial.rs b/opentmk/opentmk/src/arch/x86_64/serial.rs similarity index 99% rename from opentmk/src/arch/x86_64/serial.rs rename to opentmk/opentmk/src/arch/x86_64/serial.rs index 173794ad8f..bb16808b60 100644 --- a/opentmk/src/arch/x86_64/serial.rs +++ b/opentmk/opentmk/src/arch/x86_64/serial.rs @@ -5,7 +5,7 @@ #![allow(static_mut_refs)] use core::arch::asm; use core::fmt; -use crate::sync::Mutex; +use sync_nostd::Mutex; const COM4: u16 = 0x2E8; static mut MUTEX : Mutex<()> = Mutex::new(()); diff --git a/opentmk/src/main.rs b/opentmk/opentmk/src/main.rs similarity index 57% rename from opentmk/src/main.rs rename to opentmk/opentmk/src/main.rs index d33c63c8af..041ae6e702 100644 --- a/opentmk/src/main.rs +++ b/opentmk/opentmk/src/main.rs @@ -5,12 +5,7 @@ #![feature(abi_x86_interrupt)] #![doc = include_str!("../README.md")] -// HACK: workaround for building guest_test_uefi as part of the workspace in CI. -#![cfg_attr(all(not(test), target_os = "uefi"), no_main)] -#![cfg_attr(all(not(test), target_os = "uefi"), no_std)] -// HACK: workaround for building guest_test_uefi as part of the workspace in CI -// // Actual entrypoint is `uefi::uefi_main`, via the `#[entry]` macro #[cfg(any(test, not(target_os = "uefi")))] fn main() {} @@ -21,5 +16,4 @@ extern crate alloc; mod uefi; pub mod arch; pub mod tmk_assert; -pub mod sync; pub mod tmk_logger; diff --git a/opentmk/src/tests/hv_misc.rs b/opentmk/opentmk/src/tests/hv_misc.rs similarity index 100% rename from opentmk/src/tests/hv_misc.rs rename to opentmk/opentmk/src/tests/hv_misc.rs diff --git a/opentmk/src/tests/hv_processor.rs b/opentmk/opentmk/src/tests/hv_processor.rs similarity index 100% rename from opentmk/src/tests/hv_processor.rs rename to opentmk/opentmk/src/tests/hv_processor.rs diff --git a/opentmk/src/tests/mod.rs b/opentmk/opentmk/src/tests/mod.rs similarity index 100% rename from opentmk/src/tests/mod.rs rename to opentmk/opentmk/src/tests/mod.rs diff --git a/opentmk/src/tmk_assert.rs b/opentmk/opentmk/src/tmk_assert.rs similarity index 88% rename from opentmk/src/tmk_assert.rs rename to opentmk/opentmk/src/tmk_assert.rs index a33e3ca447..dad6e46dfe 100644 --- a/opentmk/src/tmk_assert.rs +++ b/opentmk/opentmk/src/tmk_assert.rs @@ -3,7 +3,7 @@ use alloc::string::{String, ToString}; use serde::Serialize; use serde_json::json; -pub fn format_asset_json_string( +pub fn format_assert_json_string( s: &str, terminate_new_line: bool, line: String, @@ -42,7 +42,7 @@ macro_rules! tmk_assert { let file_line = format!("{}:{}", file, line); let expn = stringify!($condition); let result: bool = $condition; - let js = crate::tmk_assert::format_asset_json_string( + let js = crate::tmk_assert::format_assert_json_string( &expn, true, file_line, result, &$message, ); crate::tmk_assert::write_str(&js); @@ -70,7 +70,7 @@ impl AssertOption for Option { let call: &core::panic::Location<'_> = core::panic::Location::caller(); let file_line = format!("{}:{}", call.file(), call.line()); let expn = type_name::>(); - let js = format_asset_json_string(expn, true, file_line, false, &message); + let js = format_assert_json_string(expn, true, file_line, false, &message); write_str(&js); panic!("Assertion failed: {}", message); } @@ -90,7 +90,7 @@ where let file_line = format!("{}:{}", call.file(), call.line()); let expn = type_name::>(); let js = - format_asset_json_string(expn, true, file_line, false, &"ResultTest"); + format_assert_json_string(expn, true, file_line, false, &"ResultTest"); write_str(&js); panic!("Assertion failed: {:?}", err); } @@ -106,7 +106,7 @@ where let call: &core::panic::Location<'_> = core::panic::Location::caller(); let file_line = format!("{}:{}", call.file(), call.line()); let expn = type_name::>(); - let js = format_asset_json_string(expn, true, file_line, false, &message); + let js = format_assert_json_string(expn, true, file_line, false, &message); write_str(&js); panic!("Assertion failed: {:?}", err); } diff --git a/opentmk/src/tmk_logger.rs b/opentmk/opentmk/src/tmk_logger.rs similarity index 97% rename from opentmk/src/tmk_logger.rs rename to opentmk/opentmk/src/tmk_logger.rs index 467d775c49..f594f60b63 100644 --- a/opentmk/src/tmk_logger.rs +++ b/opentmk/opentmk/src/tmk_logger.rs @@ -3,7 +3,7 @@ use core::fmt::Write; use alloc::{fmt::format, string::{String, ToString}}; use log::SetLoggerError; use serde_json::json; -use spin::{mutex::Mutex, MutexGuard}; +use sync_nostd::{Mutex, MutexGuard}; use crate::arch::serial::{InstrIoAccess, Serial}; diff --git a/opentmk/src/uefi/alloc.rs b/opentmk/opentmk/src/uefi/alloc.rs similarity index 99% rename from opentmk/src/uefi/alloc.rs rename to opentmk/opentmk/src/uefi/alloc.rs index edc36d054d..f6127573f7 100644 --- a/opentmk/src/uefi/alloc.rs +++ b/opentmk/opentmk/src/uefi/alloc.rs @@ -1,7 +1,7 @@ use core::{alloc::GlobalAlloc, cell::RefCell}; use linked_list_allocator::LockedHeap; -use spin::mutex::Mutex; +use sync_nostd::Mutex; use uefi::{ allocator::Allocator, boot::{self, AllocateType, MemoryType}, diff --git a/opentmk/src/uefi/context.rs b/opentmk/opentmk/src/uefi/context.rs similarity index 100% rename from opentmk/src/uefi/context.rs rename to opentmk/opentmk/src/uefi/context.rs diff --git a/opentmk/src/uefi/hypercall.rs b/opentmk/opentmk/src/uefi/hypercall.rs similarity index 100% rename from opentmk/src/uefi/hypercall.rs rename to opentmk/opentmk/src/uefi/hypercall.rs diff --git a/opentmk/src/uefi/hypvctx.rs b/opentmk/opentmk/src/uefi/hypvctx.rs similarity index 99% rename from opentmk/src/uefi/hypvctx.rs rename to opentmk/opentmk/src/uefi/hypvctx.rs index 86f24eee96..26455dbc22 100644 --- a/opentmk/src/uefi/hypvctx.rs +++ b/opentmk/opentmk/src/uefi/hypvctx.rs @@ -15,7 +15,7 @@ use hvdef::hypercall::{HvInputVtl, InitialVpContextX64}; use hvdef::Vtl; use memory_range::MemoryRange; use minimal_rt::arch::msr::{read_msr, write_msr}; -use spin::Mutex; +use sync_nostd::Mutex; const ALIGNMENT: usize = 4096; diff --git a/opentmk/src/uefi/init.rs b/opentmk/opentmk/src/uefi/init.rs similarity index 100% rename from opentmk/src/uefi/init.rs rename to opentmk/opentmk/src/uefi/init.rs diff --git a/opentmk/src/uefi/mod.rs b/opentmk/opentmk/src/uefi/mod.rs similarity index 100% rename from opentmk/src/uefi/mod.rs rename to opentmk/opentmk/src/uefi/mod.rs diff --git a/opentmk/src/uefi/rt.rs b/opentmk/opentmk/src/uefi/rt.rs similarity index 100% rename from opentmk/src/uefi/rt.rs rename to opentmk/opentmk/src/uefi/rt.rs diff --git a/opentmk/src/uefi/tests/hv_misc.rs b/opentmk/opentmk/src/uefi/tests/hv_misc.rs similarity index 98% rename from opentmk/src/uefi/tests/hv_misc.rs rename to opentmk/opentmk/src/uefi/tests/hv_misc.rs index 56229d0fb0..58e27531d6 100644 --- a/opentmk/src/uefi/tests/hv_misc.rs +++ b/opentmk/opentmk/src/uefi/tests/hv_misc.rs @@ -4,7 +4,7 @@ // This test is to verify that the VTL protections are working as expected. // The stack values in VTL0 are changing after interrupt handling in VTL1. use crate::tmk_assert::{AssertOption, AssertResult}; -use crate::sync::{Channel, Receiver, Sender}; +use sync_nostd::{Channel, Receiver, Sender}; use crate::uefi::alloc::{ALLOCATOR, SIZE_1MB}; use crate::uefi::{context, hypvctx}; use crate::{tmk_assert}; diff --git a/opentmk/src/uefi/tests/hv_processor.rs b/opentmk/opentmk/src/uefi/tests/hv_processor.rs similarity index 92% rename from opentmk/src/uefi/tests/hv_processor.rs rename to opentmk/opentmk/src/uefi/tests/hv_processor.rs index 6547087202..6dec2ae748 100644 --- a/opentmk/src/uefi/tests/hv_processor.rs +++ b/opentmk/opentmk/src/uefi/tests/hv_processor.rs @@ -1,4 +1,5 @@ use hvdef::Vtl; +use sync_nostd::Channel; use crate::{ tmk_assert, uefi::context::{TestCtxTrait, VpExecutor} @@ -13,7 +14,7 @@ pub fn exec(ctx: &mut dyn TestCtxTrait) { // Testing BSP VTL Bringup { - let (tx, rx) = crate::sync::Channel::new().split(); + let (tx, rx) = Channel::new().split(); ctx.start_on_vp(VpExecutor::new(0, Vtl::Vtl1).command( move |ctx: &mut dyn TestCtxTrait| { let vp = ctx.get_current_vp(); @@ -33,7 +34,7 @@ pub fn exec(ctx: &mut dyn TestCtxTrait) { for i in 1..vp_count { // Testing VTL1 { - let (tx, rx) = crate::sync::Channel::new().split(); + let (tx, rx) = Channel::new().split(); ctx.start_on_vp(VpExecutor::new(i, Vtl::Vtl1).command( move |ctx: &mut dyn TestCtxTrait| { let vp = ctx.get_current_vp(); @@ -51,7 +52,7 @@ pub fn exec(ctx: &mut dyn TestCtxTrait) { // Testing VTL0 { - let (tx, rx) = crate::sync::Channel::new().split(); + let (tx, rx) = Channel::new().split(); ctx.start_on_vp(VpExecutor::new(i, Vtl::Vtl0).command( move |ctx: &mut dyn TestCtxTrait| { let vp = ctx.get_current_vp(); diff --git a/opentmk/src/uefi/tests/mod.rs b/opentmk/opentmk/src/uefi/tests/mod.rs similarity index 100% rename from opentmk/src/uefi/tests/mod.rs rename to opentmk/opentmk/src/uefi/tests/mod.rs diff --git a/opentmk/sync/Cargo.toml b/opentmk/sync/Cargo.toml new file mode 100644 index 0000000000..53f9ba2ad6 --- /dev/null +++ b/opentmk/sync/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "sync_nostd" +version = "0.1.0" +rust-version.workspace = true +edition.workspace = true + +[dependencies] +spin.workspace = true + + +[lints] +workspace = true diff --git a/opentmk/src/sync.rs b/opentmk/sync/src/lib.rs similarity index 99% rename from opentmk/src/sync.rs rename to opentmk/sync/src/lib.rs index d0fe9eba59..e3a387c02c 100644 --- a/opentmk/src/sync.rs +++ b/opentmk/sync/src/lib.rs @@ -1,5 +1,8 @@ +#![no_std] +#![allow(unsafe_code)] +extern crate alloc; use core::sync::atomic::{AtomicUsize, Ordering}; -pub use spin::Mutex; +pub use spin::{Mutex, MutexGuard}; use alloc::{sync::Arc, vec::Vec}; use alloc::collections::VecDeque; use core::error::Error; From 7eb38ff9383d370c0501a189c71e5cd557734d42 Mon Sep 17 00:00:00 2001 From: Mayank Kumar Date: Mon, 12 May 2025 11:20:00 +0000 Subject: [PATCH 07/10] refactor: resolve PR feedback --- .../arch/x86_64/interrupt_handler_register.rs | 115 ++- opentmk/opentmk/src/{uefi => }/context.rs | 35 + opentmk/opentmk/src/{uefi => }/hypercall.rs | 781 +++++++++--------- opentmk/opentmk/src/main.rs | 5 + opentmk/opentmk/src/uefi/hypvctx.rs | 10 +- opentmk/opentmk/src/uefi/mod.rs | 2 - opentmk/opentmk/src/uefi/tests/hv_misc.rs | 2 +- .../opentmk/src/uefi/tests/hv_processor.rs | 2 +- 8 files changed, 512 insertions(+), 440 deletions(-) rename opentmk/opentmk/src/{uefi => }/context.rs (60%) rename opentmk/opentmk/src/{uefi => }/hypercall.rs (97%) diff --git a/opentmk/opentmk/src/arch/x86_64/interrupt_handler_register.rs b/opentmk/opentmk/src/arch/x86_64/interrupt_handler_register.rs index 596be70cd0..e6084ec4ce 100644 --- a/opentmk/opentmk/src/arch/x86_64/interrupt_handler_register.rs +++ b/opentmk/opentmk/src/arch/x86_64/interrupt_handler_register.rs @@ -1,5 +1,5 @@ #![allow(dead_code)] -use x86_64::structures::idt::{InterruptDescriptorTable, InterruptStackFrame}; +use x86_64::structures::idt::{InterruptDescriptorTable, InterruptStackFrame, PageFaultErrorCode}; use sync_nostd::Mutex; @@ -14,6 +14,40 @@ macro_rules! create_fn { }; } +macro_rules! create_fn_create_with_errorcode { + ($name:ident, $i: expr) => { + extern "x86-interrupt" fn $name(stack_frame: InterruptStackFrame, _error_code: u64) { + unsafe { (COMMON_HANDLER)(stack_frame, $i) }; + } + }; +} + +macro_rules! create_fn_divergent_create_with_errorcode { + ($name:ident, $i: expr) => { + extern "x86-interrupt" fn $name(stack_frame: InterruptStackFrame, _error_code: u64) -> ! { + unsafe { (COMMON_HANDLER)(stack_frame, $i) }; + loop{} + } + }; +} + +macro_rules! create_fn_divergent_create { + ($name:ident, $i: expr) => { + extern "x86-interrupt" fn $name(stack_frame: InterruptStackFrame) -> ! { + unsafe { (COMMON_HANDLER)(stack_frame, $i) }; + loop{} + } + }; +} + +macro_rules! create_page_fault_fn { + ($name:ident, $i: expr) => { + extern "x86-interrupt" fn $name(stack_frame:InterruptStackFrame, _error_code: PageFaultErrorCode) { + unsafe { (COMMON_HANDLER)(stack_frame, $i) }; + } + }; +} + macro_rules! register_interrupt_handler { ($idt: expr, $i: expr, $name: ident) => { $idt[$i].set_handler_fn($name); @@ -34,38 +68,33 @@ pub fn set_common_handler(handler: fn(InterruptStackFrame, u8)) { extern "x86-interrupt" fn no_op(_stack_frame: InterruptStackFrame) {} pub fn register_interrupt_handler(idt: &mut InterruptDescriptorTable) { - register_interrupt_handler!(idt, x86defs::Exception::DIVIDE_ERROR.0, handler_0); - register_interrupt_handler!(idt, x86defs::Exception::DEBUG.0, handler_1); - register_interrupt_handler!(idt, 2, handler_2); - register_interrupt_handler!(idt, x86defs::Exception::BREAKPOINT.0, handler_3); - register_interrupt_handler!(idt, x86defs::Exception::OVERFLOW.0, handler_4); - register_interrupt_handler!(idt, x86defs::Exception::BOUND_RANGE_EXCEEDED.0, handler_5); - register_interrupt_handler!(idt, x86defs::Exception::INVALID_OPCODE.0, handler_6); - register_interrupt_handler!(idt, x86defs::Exception::DEVICE_NOT_AVAILABLE.0, handler_7); - // register_interrupt_handler!(idt, x86defs::Exception::DOUBLE_FAULT.0, handler_8); + idt.divide_error.set_handler_fn(handler_0); + idt.debug.set_handler_fn(handler_1); + idt.non_maskable_interrupt.set_handler_fn(handler_2); + idt.breakpoint.set_handler_fn(handler_3); + idt.overflow.set_handler_fn(handler_4); + idt.bound_range_exceeded.set_handler_fn(handler_5); + idt.invalid_opcode.set_handler_fn(handler_6); + idt.device_not_available.set_handler_fn(handler_7); + idt.double_fault.set_handler_fn(handler_8); register_interrupt_handler!(idt, 9, handler_9); - // register_interrupt_handler!(idt, x86defs::Exception::INVALID_TSS.0, handler_10); - // register_interrupt_handler!(idt, x86defs::Exception::SEGMENT_NOT_PRESENT.0, handler_11); - // register_interrupt_handler!(idt, x86defs::Exception::STACK_SEGMENT_FAULT.0, handler_12); - // register_interrupt_handler!(idt, x86defs::Exception::GENERAL_PROTECTION_FAULT.0, handler_13); - // register_interrupt_handler!(idt, x86defs::Exception::PAGE_FAULT.0, handler_14); - // register_interrupt_handler!(idt, 15, handler_15); - // register_interrupt_handler!(idt, x86defs::Exception::FLOATING_POINT_EXCEPTION.0, handler_16); - // register_interrupt_handler!(idt, x86defs::Exception::ALIGNMENT_CHECK.0, handler_17); - // register_interrupt_handler!(idt, x86defs::Exception::MACHINE_CHECK.0, handler_18); - // register_interrupt_handler!(idt, x86defs::Exception::SIMD_FLOATING_POINT_EXCEPTION.0, handler_19); - // register_interrupt_handler!(idt, 20, handler_20); - // register_interrupt_handler!(idt, 21, handler_21); - // register_interrupt_handler!(idt, 22, handler_22); - // register_interrupt_handler!(idt, 23, handler_23); - // register_interrupt_handler!(idt, 24, handler_24); - // register_interrupt_handler!(idt, 25, handler_25); - // register_interrupt_handler!(idt, 26, handler_26); - // register_interrupt_handler!(idt, 27, handler_27); - // register_interrupt_handler!(idt, 28, handler_28); - // register_interrupt_handler!(idt, x86defs::Exception::SEV_VMM_COMMUNICATION.0, handler_29); - // register_interrupt_handler!(idt, 30, handler_30); - // register_interrupt_handler!(idt, 31, handler_31); + idt.invalid_tss.set_handler_fn(handler_10); + idt.segment_not_present.set_handler_fn(handler_11); + idt.stack_segment_fault.set_handler_fn(handler_12); + idt.general_protection_fault.set_handler_fn(handler_13); + idt.page_fault.set_handler_fn(handler_14); + // Vector 15 is reserved + idt.x87_floating_point.set_handler_fn(handler_16); + idt.alignment_check.set_handler_fn(handler_17); + idt.machine_check.set_handler_fn(handler_18); + idt.simd_floating_point.set_handler_fn(handler_19); + idt.virtualization.set_handler_fn(handler_20); + idt.cp_protection_exception.set_handler_fn(handler_21); + // Vector 22-27 is reserved + idt.hv_injection_exception.set_handler_fn(handler_28); + idt.vmm_communication_exception.set_handler_fn(handler_29); + idt.security_exception.set_handler_fn(handler_30); + // Vector 31 is reserved register_interrupt_handler!(idt, 32, handler_32); register_interrupt_handler!(idt, 33, handler_33); @@ -301,20 +330,20 @@ create_fn!(handler_4, 4); create_fn!(handler_5, 5); create_fn!(handler_6, 6); create_fn!(handler_7, 7); -create_fn!(handler_8, 8); +create_fn_divergent_create_with_errorcode!(handler_8, 8); create_fn!(handler_9, 9); -create_fn!(handler_10, 10); -create_fn!(handler_11, 11); -create_fn!(handler_12, 12); -create_fn!(handler_13, 13); -create_fn!(handler_14, 14); +create_fn_create_with_errorcode!(handler_10, 10); +create_fn_create_with_errorcode!(handler_11, 11); +create_fn_create_with_errorcode!(handler_12, 12); +create_fn_create_with_errorcode!(handler_13, 13); +create_page_fault_fn!(handler_14, 14); create_fn!(handler_15, 15); create_fn!(handler_16, 16); -create_fn!(handler_17, 17); -create_fn!(handler_18, 18); +create_fn_create_with_errorcode!(handler_17, 17); +create_fn_divergent_create!(handler_18, 18); create_fn!(handler_19, 19); create_fn!(handler_20, 20); -create_fn!(handler_21, 21); +create_fn_create_with_errorcode!(handler_21, 21); create_fn!(handler_22, 22); create_fn!(handler_23, 23); create_fn!(handler_24, 24); @@ -322,8 +351,8 @@ create_fn!(handler_25, 25); create_fn!(handler_26, 26); create_fn!(handler_27, 27); create_fn!(handler_28, 28); -create_fn!(handler_29, 29); -create_fn!(handler_30, 30); +create_fn_create_with_errorcode!(handler_29, 29); +create_fn_create_with_errorcode!(handler_30, 30); create_fn!(handler_31, 31); create_fn!(handler_32, 32); create_fn!(handler_33, 33); diff --git a/opentmk/opentmk/src/uefi/context.rs b/opentmk/opentmk/src/context.rs similarity index 60% rename from opentmk/opentmk/src/uefi/context.rs rename to opentmk/opentmk/src/context.rs index 3249c0bd3f..da52e8ee32 100644 --- a/opentmk/opentmk/src/uefi/context.rs +++ b/opentmk/opentmk/src/context.rs @@ -5,6 +5,41 @@ use alloc::boxed::Box; use hvdef::Vtl; +pub trait SecureInterceptPlatformTrait { + fn setup_secure_intercept(&mut self, interrupt_idx: u8); +} + +pub trait InterruptPlatformTrait { + fn set_interrupt_idx(&mut self, interrupt_idx: u8, handler: fn()); + fn setup_interrupt_handler(&mut self); +} + +pub trait MsrPlatformTrait { + fn read_msr(&mut self, msr: u32) -> u64; + fn write_msr(&mut self, msr: u32, value: u64); +} + +pub trait VirtualProcessorlatformTrait { + fn get_register(&mut self, reg: u32) -> u128; + fn get_vp_count(&self) -> u32; + fn queue_command_vp(&mut self, cmd: VpExecutor); + fn start_on_vp(&mut self, cmd: VpExecutor); + fn start_running_vp_with_default_context(&mut self, cmd: VpExecutor); +} + +pub trait VtlPlatformTrait { + fn apply_vtl_protection_for_memory(&mut self, range: Range, vtl: Vtl); + fn enable_vp_vtl_with_default_context(&mut self, vp_index: u32, vtl: Vtl); + fn get_current_vtl(&self) -> Vtl; + fn set_default_ctx_to_vp(&mut self, vp_index: u32, vtl: Vtl); + fn setup_partition_vtl(&mut self, vtl: Vtl); + fn setup_vtl_protection(&mut self); + fn switch_to_high_vtl(&mut self); + fn switch_to_low_vtl(&mut self); +} + + + pub trait TestCtxTrait { diff --git a/opentmk/opentmk/src/uefi/hypercall.rs b/opentmk/opentmk/src/hypercall.rs similarity index 97% rename from opentmk/opentmk/src/uefi/hypercall.rs rename to opentmk/opentmk/src/hypercall.rs index 26aa18db9a..9129635272 100644 --- a/opentmk/opentmk/src/uefi/hypercall.rs +++ b/opentmk/opentmk/src/hypercall.rs @@ -121,250 +121,47 @@ pub struct HvCall { #[expect(unsafe_code)] impl HvCall { - pub const fn new() -> Self { - HvCall { - initialized: false, - input_page: HvcallPage::new(), - output_page: HvcallPage::new(), - } - } - fn input_page(&mut self) -> &mut HvcallPage { - &mut self.input_page - } - - fn output_page(&mut self) -> &mut HvcallPage { - &mut self.output_page - } - - /// Returns the address of the hypercall page, mapping it first if - /// necessary. - #[cfg(target_arch = "x86_64")] - pub fn hypercall_page(&mut self) -> u64 { - self.init_if_needed(); - core::ptr::addr_of!(HYPERCALL_PAGE) as u64 - } - - fn init_if_needed(&mut self) { - if !self.initialized { - self.initialize(); - } - } - - pub fn initialize(&mut self) { - assert!(!self.initialized); - - // TODO: revisit os id value. For now, use 1 (which is what UEFI does) - let guest_os_id = hvdef::hypercall::HvGuestOsMicrosoft::new().with_os_id(1); - crate::arch::hypercall::initialize(guest_os_id.into()); - self.initialized = true; - } - - /// Call before jumping to kernel. - pub fn uninitialize(&mut self) { - if self.initialized { - crate::arch::hypercall::uninitialize(); - self.initialized = false; - } - } - - /// Returns the environment's VTL. - pub fn vtl(&mut self) -> Vtl { - assert!(self.initialized); - self - .get_register(hvdef::HvAllArchRegisterName::VsmVpStatus.into(), None) - .map_or(Vtl::Vtl0, |status| { - hvdef::HvRegisterVsmVpStatus::from(status.as_u64()) - .active_vtl() - .try_into() - .unwrap() - }) - } - - /// Makes a hypercall. - /// rep_count is Some for rep hypercalls - fn dispatch_hvcall( - &mut self, - code: hvdef::HypercallCode, - rep_count: Option, - ) -> hvdef::hypercall::HypercallOutput { - self.init_if_needed(); - - let control: hvdef::hypercall::Control = hvdef::hypercall::Control::new() - .with_code(code.0) - .with_rep_count(rep_count.unwrap_or_default()); - - // SAFETY: Invoking hypercall per TLFS spec - unsafe { - invoke_hypercall( - control, - self.input_page().address(), - self.output_page().address(), - ) - } - } - - pub fn set_vp_registers( + /// Hypercall to accept vtl2 pages from address start to end with VTL 2 + /// protections and no host visibility + #[cfg_attr(target_arch = "aarch64", allow(dead_code))] + pub fn accept_vtl2_pages( &mut self, - vp: u32, - vtl: Option, - vp_context: Option, + range: MemoryRange, + memory_type: hvdef::hypercall::AcceptMemoryType, ) -> Result<(), hvdef::HvError> { - const HEADER_SIZE: usize = size_of::(); - - let header = hvdef::hypercall::GetSetVpRegisters { - partition_id: hvdef::HV_PARTITION_ID_SELF, - vp_index: vp, - target_vtl: vtl.unwrap_or(HvInputVtl::CURRENT_VTL), - rsvd: [0; 3], - }; - - let _ = header.write_to_prefix(self.input_page().buffer.as_mut_slice()); - - let mut input_offset = HEADER_SIZE; + const HEADER_SIZE: usize = size_of::(); + const MAX_INPUT_ELEMENTS: usize = (HV_PAGE_SIZE as usize - HEADER_SIZE) / size_of::(); - let mut count = 0; - let mut write_reg = |reg_name: hvdef::HvRegisterName, reg_value: HvRegisterValue| { - let reg = hvdef::hypercall::HvRegisterAssoc { - name: reg_name, - pad: Default::default(), - value: reg_value, + let mut current_page = range.start_4k_gpn(); + while current_page < range.end_4k_gpn() { + let header = hvdef::hypercall::AcceptGpaPages { + partition_id: hvdef::HV_PARTITION_ID_SELF, + page_attributes: hvdef::hypercall::AcceptPagesAttributes::new() + .with_memory_type(memory_type.0) + .with_host_visibility(hvdef::hypercall::HostVisibilityType::PRIVATE) // no host visibility + .with_vtl_set(1 << 2), // applies vtl permissions for vtl 2 + vtl_permission_set: hvdef::hypercall::VtlPermissionSet { + vtl_permission_from_1: [0; hvdef::hypercall::HV_VTL_PERMISSION_SET_SIZE], + }, + gpa_page_base: current_page, }; - let _ = reg.write_to_prefix(&mut self.input_page().buffer[input_offset..]); - - input_offset += size_of::(); - count += 1; - }; - // pub msr_cr_pat: u64, - - write_reg( - HvX64RegisterName::Cr0.into(), - vp_context.unwrap().cr0.into(), - ); - write_reg( - HvX64RegisterName::Cr3.into(), - vp_context.unwrap().cr3.into(), - ); - write_reg( - HvX64RegisterName::Cr4.into(), - vp_context.unwrap().cr4.into(), - ); - write_reg( - HvX64RegisterName::Rip.into(), - vp_context.unwrap().rip.into(), - ); - write_reg( - HvX64RegisterName::Rsp.into(), - vp_context.unwrap().rsp.into(), - ); - write_reg( - HvX64RegisterName::Rflags.into(), - vp_context.unwrap().rflags.into(), - ); - write_reg( - HvX64RegisterName::Cs.into(), - vp_context.unwrap().cs.into(), - ); - write_reg( - HvX64RegisterName::Ss.into(), - vp_context.unwrap().ss.into(), - ); - write_reg( - HvX64RegisterName::Ds.into(), - vp_context.unwrap().ds.into(), - ); - write_reg( - HvX64RegisterName::Es.into(), - vp_context.unwrap().es.into(), - ); - write_reg( - HvX64RegisterName::Fs.into(), - vp_context.unwrap().fs.into(), - ); - write_reg( - HvX64RegisterName::Gs.into(), - vp_context.unwrap().gs.into(), - ); - write_reg( - HvX64RegisterName::Gdtr.into(), - vp_context.unwrap().gdtr.into(), - ); - write_reg( - HvX64RegisterName::Idtr.into(), - vp_context.unwrap().idtr.into(), - ); - write_reg( - HvX64RegisterName::Ldtr.into(), - vp_context.unwrap().ldtr.into(), - ); - write_reg( - HvX64RegisterName::Tr.into(), - vp_context.unwrap().tr.into(), - ); - write_reg( - HvX64RegisterName::Efer.into(), - vp_context.unwrap().efer.into(), - ); - - let output = self.dispatch_hvcall(hvdef::HypercallCode::HvCallSetVpRegisters, Some(count)); - - output.result() - } - - /// Hypercall for setting a register to a value. - pub fn set_register( - &mut self, - name: hvdef::HvRegisterName, - value: HvRegisterValue, - vtl: Option, - ) -> Result<(), hvdef::HvError> { - const HEADER_SIZE: usize = size_of::(); - - let header = hvdef::hypercall::GetSetVpRegisters { - partition_id: hvdef::HV_PARTITION_ID_SELF, - vp_index: hvdef::HV_VP_INDEX_SELF, - target_vtl: vtl.unwrap_or(HvInputVtl::CURRENT_VTL), - rsvd: [0; 3], - }; - - let _ = header.write_to_prefix(self.input_page().buffer.as_mut_slice()); - - let reg = hvdef::hypercall::HvRegisterAssoc { - name, - pad: Default::default(), - value, - }; - - let _ = reg.write_to_prefix(&mut self.input_page().buffer[HEADER_SIZE..]); - - let output = self.dispatch_hvcall(hvdef::HypercallCode::HvCallSetVpRegisters, Some(1)); - - output.result() - } + let remaining_pages = range.end_4k_gpn() - current_page; + let count = remaining_pages.min(MAX_INPUT_ELEMENTS as u64); - /// Hypercall for setting a register to a value. - pub fn get_register( - &mut self, - name: hvdef::HvRegisterName, - vtl: Option, - ) -> Result { - const HEADER_SIZE: usize = size_of::(); + let _ = header.write_to_prefix(self.input_page().buffer.as_mut_slice()); - let header = hvdef::hypercall::GetSetVpRegisters { - partition_id: hvdef::HV_PARTITION_ID_SELF, - vp_index: hvdef::HV_VP_INDEX_SELF, - target_vtl: vtl.unwrap_or(HvInputVtl::CURRENT_VTL), - rsvd: [0; 3], - }; + let output = self.dispatch_hvcall( + hvdef::HypercallCode::HvCallAcceptGpaPages, + Some(count as usize), + ); - let _ = header.write_to_prefix(self.input_page().buffer.as_mut_slice()); - let _ = name.write_to_prefix(&mut self.input_page().buffer[HEADER_SIZE..]); + output.result()?; - let output = self.dispatch_hvcall(hvdef::HypercallCode::HvCallGetVpRegisters, Some(1)); - output.result()?; - let value = HvRegisterValue::read_from_prefix(&self.output_page().buffer).unwrap(); + current_page += count; + } - Ok(value.0) + Ok(()) } /// Hypercall to apply vtl protections to the pages from address start to end @@ -453,77 +250,57 @@ impl HvCall { Ok(()) } - #[cfg(target_arch = "x86_64")] - /// Hypercall to get the current VTL VP context - pub fn get_current_vtl_vp_context(&mut self) -> Result { - use HvX64RegisterName; - use zerocopy::FromZeros; - let mut context: InitialVpContextX64 = FromZeros::new_zeroed(); - context.cr0 = self - .get_register(HvX64RegisterName::Cr0.into(), None)? - .as_u64(); - context.cr3 = self - .get_register(HvX64RegisterName::Cr3.into(), None)? - .as_u64(); - context.cr4 = self - .get_register(HvX64RegisterName::Cr4.into(), None)? - .as_u64(); - context.rip = self - .get_register(HvX64RegisterName::Rip.into(), None)? - .as_u64(); - context.rsp = self - .get_register(HvX64RegisterName::Rsp.into(), None)? - .as_u64(); - context.rflags = self - .get_register(HvX64RegisterName::Rflags.into(), None)? - .as_u64(); - context.cs = self - .get_register(HvX64RegisterName::Cs.into(), None)? - .as_segment(); - context.ss = self - .get_register(HvX64RegisterName::Ss.into(), None)? - .as_segment(); - context.ds = self - .get_register(HvX64RegisterName::Ds.into(), None)? - .as_segment(); - context.es = self - .get_register(HvX64RegisterName::Es.into(), None)? - .as_segment(); - context.fs = self - .get_register(HvX64RegisterName::Fs.into(), None)? - .as_segment(); - context.gs = self - .get_register(HvX64RegisterName::Gs.into(), None)? - .as_segment(); - context.gdtr = self - .get_register(HvX64RegisterName::Gdtr.into(), None)? - .as_table(); - context.idtr = self - .get_register(HvX64RegisterName::Idtr.into(), None)? - .as_table(); - context.tr = self - .get_register(HvX64RegisterName::Tr.into(), None)? - .as_segment(); - context.efer = self - .get_register(HvX64RegisterName::Efer.into(), None)? - .as_u64(); - Ok(context) - } + /// Makes a hypercall. + /// rep_count is Some for rep hypercalls + fn dispatch_hvcall( + &mut self, + code: hvdef::HypercallCode, + rep_count: Option, + ) -> hvdef::hypercall::HypercallOutput { + self.init_if_needed(); - pub fn vtl_call() { let control: hvdef::hypercall::Control = hvdef::hypercall::Control::new() - .with_code(hvdef::HypercallCode::HvCallVtlCall.0) - .with_rep_count(0); - invoke_hypercall_vtl(control); + .with_code(code.0) + .with_rep_count(rep_count.unwrap_or_default()); + + // SAFETY: Invoking hypercall per TLFS spec + unsafe { + invoke_hypercall( + control, + self.input_page().address(), + self.output_page().address(), + ) + } } - pub fn vtl_return() { - let control: hvdef::hypercall::Control = hvdef::hypercall::Control::new() - .with_code(hvdef::HypercallCode::HvCallVtlReturn.0) - .with_rep_count(0); - invoke_hypercall_vtl(control); + /// Enables a VTL for the specified partition. + pub fn enable_partition_vtl( + &mut self, + partition_id: u64, + target_vtl: Vtl, + ) -> Result<(), hvdef::HvError> { + let flags: EnablePartitionVtlFlags = EnablePartitionVtlFlags::new() + .with_enable_mbec(false) + .with_enable_supervisor_shadow_stack(false); + + let header = hvdef::hypercall::EnablePartitionVtl { + partition_id, + target_vtl: target_vtl.into(), + flags, + reserved_z0: 0, + reserved_z1: 0, + }; + + let _ = header.write_to_prefix(self.input_page().buffer.as_mut_slice()); + + let output = self.dispatch_hvcall(hvdef::HypercallCode::HvCallEnablePartitionVtl, None); + match output.result() { + Ok(()) | Err(hvdef::HvError::VtlAlreadyEnabled) => Ok(()), + err => err, + } } + /// Enables VTL protection for the specified VTL. pub fn enable_vtl_protection( &mut self, vtl: HvInputVtl, @@ -548,6 +325,7 @@ impl HvCall { } #[cfg(target_arch = "x86_64")] + /// Enables a VTL for a specific virtual processor (VP) on x86_64. pub fn enable_vp_vtl( &mut self, vp_index: u32, @@ -573,59 +351,6 @@ impl HvCall { } } - #[cfg(target_arch = "x86_64")] - pub fn start_virtual_processor( - &mut self, - vp_index: u32, - target_vtl: Vtl, - vp_context: Option, - ) -> Result<(), hvdef::HvError> { - let header = hvdef::hypercall::StartVirtualProcessorX64 { - partition_id: hvdef::HV_PARTITION_ID_SELF, - vp_index, - target_vtl: target_vtl.into(), - vp_context: vp_context.unwrap_or(zerocopy::FromZeros::new_zeroed()), - rsvd0: 0u8, - rsvd1: 0u16, - }; - - header - .write_to_prefix(self.input_page().buffer.as_mut_slice()) - .expect("size of start_virtual_processor header is not correct"); - - let output = self.dispatch_hvcall(hvdef::HypercallCode::HvCallStartVirtualProcessor, None); - match output.result() { - Ok(()) => Ok(()), - err => panic!("Failed to start virtual processor: {:?}", err), - } - } - - pub fn enable_partition_vtl( - &mut self, - partition_id: u64, - target_vtl: Vtl, - ) -> Result<(), hvdef::HvError> { - let flags: EnablePartitionVtlFlags = EnablePartitionVtlFlags::new() - .with_enable_mbec(false) - .with_enable_supervisor_shadow_stack(false); - - let header = hvdef::hypercall::EnablePartitionVtl { - partition_id, - target_vtl: target_vtl.into(), - flags, - reserved_z0: 0, - reserved_z1: 0, - }; - - let _ = header.write_to_prefix(self.input_page().buffer.as_mut_slice()); - - let output = self.dispatch_hvcall(hvdef::HypercallCode::HvCallEnablePartitionVtl, None); - match output.result() { - Ok(()) | Err(hvdef::HvError::VtlAlreadyEnabled) => Ok(()), - err => err, - } - } - /// Hypercall to enable VP VTL #[cfg(target_arch = "aarch64")] pub fn enable_vp_vtl(&mut self, vp_index: u32) -> Result<(), hvdef::HvError> { @@ -648,47 +373,86 @@ impl HvCall { } } - /// Hypercall to accept vtl2 pages from address start to end with VTL 2 - /// protections and no host visibility - #[cfg_attr(target_arch = "aarch64", allow(dead_code))] - pub fn accept_vtl2_pages( - &mut self, - range: MemoryRange, - memory_type: hvdef::hypercall::AcceptMemoryType, - ) -> Result<(), hvdef::HvError> { - const HEADER_SIZE: usize = size_of::(); - const MAX_INPUT_ELEMENTS: usize = (HV_PAGE_SIZE as usize - HEADER_SIZE) / size_of::(); - - let mut current_page = range.start_4k_gpn(); - while current_page < range.end_4k_gpn() { - let header = hvdef::hypercall::AcceptGpaPages { - partition_id: hvdef::HV_PARTITION_ID_SELF, - page_attributes: hvdef::hypercall::AcceptPagesAttributes::new() - .with_memory_type(memory_type.0) - .with_host_visibility(hvdef::hypercall::HostVisibilityType::PRIVATE) // no host visibility - .with_vtl_set(1 << 2), // applies vtl permissions for vtl 2 - vtl_permission_set: hvdef::hypercall::VtlPermissionSet { - vtl_permission_from_1: [0; hvdef::hypercall::HV_VTL_PERMISSION_SET_SIZE], - }, - gpa_page_base: current_page, - }; - - let remaining_pages = range.end_4k_gpn() - current_page; - let count = remaining_pages.min(MAX_INPUT_ELEMENTS as u64); + #[cfg(target_arch = "x86_64")] + /// Hypercall to get the current VTL VP context + pub fn get_current_vtl_vp_context(&mut self) -> Result { + use HvX64RegisterName; + use zerocopy::FromZeros; + let mut context: InitialVpContextX64 = FromZeros::new_zeroed(); + context.cr0 = self + .get_register(HvX64RegisterName::Cr0.into(), None)? + .as_u64(); + context.cr3 = self + .get_register(HvX64RegisterName::Cr3.into(), None)? + .as_u64(); + context.cr4 = self + .get_register(HvX64RegisterName::Cr4.into(), None)? + .as_u64(); + context.rip = self + .get_register(HvX64RegisterName::Rip.into(), None)? + .as_u64(); + context.rsp = self + .get_register(HvX64RegisterName::Rsp.into(), None)? + .as_u64(); + context.rflags = self + .get_register(HvX64RegisterName::Rflags.into(), None)? + .as_u64(); + context.cs = self + .get_register(HvX64RegisterName::Cs.into(), None)? + .as_segment(); + context.ss = self + .get_register(HvX64RegisterName::Ss.into(), None)? + .as_segment(); + context.ds = self + .get_register(HvX64RegisterName::Ds.into(), None)? + .as_segment(); + context.es = self + .get_register(HvX64RegisterName::Es.into(), None)? + .as_segment(); + context.fs = self + .get_register(HvX64RegisterName::Fs.into(), None)? + .as_segment(); + context.gs = self + .get_register(HvX64RegisterName::Gs.into(), None)? + .as_segment(); + context.gdtr = self + .get_register(HvX64RegisterName::Gdtr.into(), None)? + .as_table(); + context.idtr = self + .get_register(HvX64RegisterName::Idtr.into(), None)? + .as_table(); + context.tr = self + .get_register(HvX64RegisterName::Tr.into(), None)? + .as_segment(); + context.efer = self + .get_register(HvX64RegisterName::Efer.into(), None)? + .as_u64(); + Ok(context) + } - let _ = header.write_to_prefix(self.input_page().buffer.as_mut_slice()); + /// Hypercall for setting a register to a value. + pub fn get_register( + &mut self, + name: hvdef::HvRegisterName, + vtl: Option, + ) -> Result { + const HEADER_SIZE: usize = size_of::(); - let output = self.dispatch_hvcall( - hvdef::HypercallCode::HvCallAcceptGpaPages, - Some(count as usize), - ); + let header = hvdef::hypercall::GetSetVpRegisters { + partition_id: hvdef::HV_PARTITION_ID_SELF, + vp_index: hvdef::HV_VP_INDEX_SELF, + target_vtl: vtl.unwrap_or(HvInputVtl::CURRENT_VTL), + rsvd: [0; 3], + }; - output.result()?; + let _ = header.write_to_prefix(self.input_page().buffer.as_mut_slice()); + let _ = name.write_to_prefix(&mut self.input_page().buffer[HEADER_SIZE..]); - current_page += count; - } + let output = self.dispatch_hvcall(hvdef::HypercallCode::HvCallGetVpRegisters, Some(1)); + output.result()?; + let value = HvRegisterValue::read_from_prefix(&self.output_page().buffer).unwrap(); - Ok(()) + Ok(value.0) } /// Get the corresponding VP indices from a list of VP hardware IDs (APIC @@ -736,6 +500,247 @@ impl HvCall { Ok(()) } + + /// Initializes the hypercall interface if it hasn't been already. + fn init_if_needed(&mut self) { + if !self.initialized { + self.initialize(); + } + } + + /// Initializes the hypercall interface. + pub fn initialize(&mut self) { + assert!(!self.initialized); + + // TODO: revisit os id value. For now, use 1 (which is what UEFI does) + let guest_os_id = hvdef::hypercall::HvGuestOsMicrosoft::new().with_os_id(1); + crate::arch::hypercall::initialize(guest_os_id.into()); + self.initialized = true; + } + + /// Returns a mutable reference to the hypercall input page. + fn input_page(&mut self) -> &mut HvcallPage { + &mut self.input_page + } + + /// Creates a new `HvCall` instance. + pub const fn new() -> Self { + HvCall { + initialized: false, + input_page: HvcallPage::new(), + output_page: HvcallPage::new(), + } + } + + /// Returns a mutable reference to the hypercall output page. + fn output_page(&mut self) -> &mut HvcallPage { + &mut self.output_page + } + + /// Hypercall for setting a register to a value. + pub fn set_register( + &mut self, + name: hvdef::HvRegisterName, + value: HvRegisterValue, + vtl: Option, + ) -> Result<(), hvdef::HvError> { + const HEADER_SIZE: usize = size_of::(); + + let header = hvdef::hypercall::GetSetVpRegisters { + partition_id: hvdef::HV_PARTITION_ID_SELF, + vp_index: hvdef::HV_VP_INDEX_SELF, + target_vtl: vtl.unwrap_or(HvInputVtl::CURRENT_VTL), + rsvd: [0; 3], + }; + + let _ = header.write_to_prefix(self.input_page().buffer.as_mut_slice()); + + let reg = hvdef::hypercall::HvRegisterAssoc { + name, + pad: Default::default(), + value, + }; + + let _ = reg.write_to_prefix(&mut self.input_page().buffer[HEADER_SIZE..]); + + let output = self.dispatch_hvcall(hvdef::HypercallCode::HvCallSetVpRegisters, Some(1)); + + output.result() + } + + /// Sets multiple virtual processor (VP) registers for a given VP and VTL. + pub fn set_vp_registers( + &mut self, + vp: u32, + vtl: Option, + vp_context: Option, + ) -> Result<(), hvdef::HvError> { + const HEADER_SIZE: usize = size_of::(); + + let header = hvdef::hypercall::GetSetVpRegisters { + partition_id: hvdef::HV_PARTITION_ID_SELF, + vp_index: vp, + target_vtl: vtl.unwrap_or(HvInputVtl::CURRENT_VTL), + rsvd: [0; 3], + }; + + let _ = header.write_to_prefix(self.input_page().buffer.as_mut_slice()); + + let mut input_offset = HEADER_SIZE; + + let mut count = 0; + let mut write_reg = |reg_name: hvdef::HvRegisterName, reg_value: HvRegisterValue| { + let reg = hvdef::hypercall::HvRegisterAssoc { + name: reg_name, + pad: Default::default(), + value: reg_value, + }; + + let _ = reg.write_to_prefix(&mut self.input_page().buffer[input_offset..]); + + input_offset += size_of::(); + count += 1; + }; + // pub msr_cr_pat: u64, + + write_reg( + HvX64RegisterName::Cr0.into(), + vp_context.unwrap().cr0.into(), + ); + write_reg( + HvX64RegisterName::Cr3.into(), + vp_context.unwrap().cr3.into(), + ); + write_reg( + HvX64RegisterName::Cr4.into(), + vp_context.unwrap().cr4.into(), + ); + write_reg( + HvX64RegisterName::Rip.into(), + vp_context.unwrap().rip.into(), + ); + write_reg( + HvX64RegisterName::Rsp.into(), + vp_context.unwrap().rsp.into(), + ); + write_reg( + HvX64RegisterName::Rflags.into(), + vp_context.unwrap().rflags.into(), + ); + write_reg( + HvX64RegisterName::Cs.into(), + vp_context.unwrap().cs.into(), + ); + write_reg( + HvX64RegisterName::Ss.into(), + vp_context.unwrap().ss.into(), + ); + write_reg( + HvX64RegisterName::Ds.into(), + vp_context.unwrap().ds.into(), + ); + write_reg( + HvX64RegisterName::Es.into(), + vp_context.unwrap().es.into(), + ); + write_reg( + HvX64RegisterName::Fs.into(), + vp_context.unwrap().fs.into(), + ); + write_reg( + HvX64RegisterName::Gs.into(), + vp_context.unwrap().gs.into(), + ); + write_reg( + HvX64RegisterName::Gdtr.into(), + vp_context.unwrap().gdtr.into(), + ); + write_reg( + HvX64RegisterName::Idtr.into(), + vp_context.unwrap().idtr.into(), + ); + write_reg( + HvX64RegisterName::Ldtr.into(), + vp_context.unwrap().ldtr.into(), + ); + write_reg( + HvX64RegisterName::Tr.into(), + vp_context.unwrap().tr.into(), + ); + write_reg( + HvX64RegisterName::Efer.into(), + vp_context.unwrap().efer.into(), + ); + + let output = self.dispatch_hvcall(hvdef::HypercallCode::HvCallSetVpRegisters, Some(count)); + + output.result() + } + + #[cfg(target_arch = "x86_64")] + /// Starts a virtual processor (VP) with the specified VTL and context on x86_64. + pub fn start_virtual_processor( + &mut self, + vp_index: u32, + target_vtl: Vtl, + vp_context: Option, + ) -> Result<(), hvdef::HvError> { + let header = hvdef::hypercall::StartVirtualProcessorX64 { + partition_id: hvdef::HV_PARTITION_ID_SELF, + vp_index, + target_vtl: target_vtl.into(), + vp_context: vp_context.unwrap_or(zerocopy::FromZeros::new_zeroed()), + rsvd0: 0u8, + rsvd1: 0u16, + }; + + header + .write_to_prefix(self.input_page().buffer.as_mut_slice()) + .expect("size of start_virtual_processor header is not correct"); + + let output = self.dispatch_hvcall(hvdef::HypercallCode::HvCallStartVirtualProcessor, None); + match output.result() { + Ok(()) => Ok(()), + err => panic!("Failed to start virtual processor: {:?}", err), + } + } + + /// Call before jumping to kernel. + pub fn uninitialize(&mut self) { + if self.initialized { + crate::arch::hypercall::uninitialize(); + self.initialized = false; + } + } + + /// Returns the environment's VTL. + pub fn vtl(&mut self) -> Vtl { + assert!(self.initialized); + self + .get_register(hvdef::HvAllArchRegisterName::VsmVpStatus.into(), None) + .map_or(Vtl::Vtl0, |status| { + hvdef::HvRegisterVsmVpStatus::from(status.as_u64()) + .active_vtl() + .try_into() + .unwrap() + }) + } + + /// Invokes the HvCallVtlCall hypercall. + pub fn vtl_call() { + let control: hvdef::hypercall::Control = hvdef::hypercall::Control::new() + .with_code(hvdef::HypercallCode::HvCallVtlCall.0) + .with_rep_count(0); + invoke_hypercall_vtl(control); + } + + /// Invokes the HvCallVtlReturn hypercall. + pub fn vtl_return() { + let control: hvdef::hypercall::Control = hvdef::hypercall::Control::new() + .with_code(hvdef::HypercallCode::HvCallVtlReturn.0) + .with_rep_count(0); + invoke_hypercall_vtl(control); + } } /// The "hardware ID" used for [`HvCall::get_vp_index_from_hw_id`]. This is the diff --git a/opentmk/opentmk/src/main.rs b/opentmk/opentmk/src/main.rs index 041ae6e702..dc49cd0347 100644 --- a/opentmk/opentmk/src/main.rs +++ b/opentmk/opentmk/src/main.rs @@ -6,6 +6,9 @@ #![doc = include_str!("../README.md")] +#![cfg_attr(all(not(test), target_os = "uefi"), no_main)] +#![cfg_attr(all(not(test), target_os = "uefi"), no_std)] + // Actual entrypoint is `uefi::uefi_main`, via the `#[entry]` macro #[cfg(any(test, not(target_os = "uefi")))] fn main() {} @@ -17,3 +20,5 @@ mod uefi; pub mod arch; pub mod tmk_assert; pub mod tmk_logger; +pub mod hypercall; +pub mod context; \ No newline at end of file diff --git a/opentmk/opentmk/src/uefi/hypvctx.rs b/opentmk/opentmk/src/uefi/hypvctx.rs index 26455dbc22..bf4979568e 100644 --- a/opentmk/opentmk/src/uefi/hypvctx.rs +++ b/opentmk/opentmk/src/uefi/hypvctx.rs @@ -1,4 +1,4 @@ -use super::{ +use crate::{ context::{TestCtxTrait, VpExecutor}, hypercall::HvCall, }; @@ -431,12 +431,12 @@ impl HvTestCtx { .expect("Failed to get VTL1 context"); let stack_layout = Layout::from_size_align(SIZE_1MB, 16) .expect("Failed to create layout for stack allocation"); - let x = unsafe { ALLOCATOR.alloc(stack_layout) }; - if x.is_null() { + let allocated_stack_ptr = unsafe { ALLOCATOR.alloc(stack_layout) }; + if allocated_stack_ptr.is_null() { return Err(false); } - let sz = stack_layout.size(); - let stack_top = x as u64 + sz as u64; + let stack_size = stack_layout.size(); + let stack_top = allocated_stack_ptr as u64 + stack_size as u64; let fn_ptr = func as fn(); let fn_address = fn_ptr as u64; vp_context.rip = fn_address; diff --git a/opentmk/opentmk/src/uefi/mod.rs b/opentmk/opentmk/src/uefi/mod.rs index ed2b2e0001..1a5e04d747 100644 --- a/opentmk/opentmk/src/uefi/mod.rs +++ b/opentmk/opentmk/src/uefi/mod.rs @@ -2,8 +2,6 @@ // Licensed under the MIT License. mod alloc; -mod context; -pub mod hypercall; mod hypvctx; pub mod init; mod rt; diff --git a/opentmk/opentmk/src/uefi/tests/hv_misc.rs b/opentmk/opentmk/src/uefi/tests/hv_misc.rs index 58e27531d6..9e4f27bdcd 100644 --- a/opentmk/opentmk/src/uefi/tests/hv_misc.rs +++ b/opentmk/opentmk/src/uefi/tests/hv_misc.rs @@ -6,7 +6,7 @@ use crate::tmk_assert::{AssertOption, AssertResult}; use sync_nostd::{Channel, Receiver, Sender}; use crate::uefi::alloc::{ALLOCATOR, SIZE_1MB}; -use crate::uefi::{context, hypvctx}; +use crate::{context, uefi::hypvctx}; use crate::{tmk_assert}; use ::alloc::boxed::Box; use alloc::sync::Arc; diff --git a/opentmk/opentmk/src/uefi/tests/hv_processor.rs b/opentmk/opentmk/src/uefi/tests/hv_processor.rs index 6dec2ae748..b8fbee2ed0 100644 --- a/opentmk/opentmk/src/uefi/tests/hv_processor.rs +++ b/opentmk/opentmk/src/uefi/tests/hv_processor.rs @@ -2,7 +2,7 @@ use hvdef::Vtl; use sync_nostd::Channel; use crate::{ - tmk_assert, uefi::context::{TestCtxTrait, VpExecutor} + tmk_assert, context::{TestCtxTrait, VpExecutor} }; pub fn exec(ctx: &mut dyn TestCtxTrait) { From 2d2aec48583df98946f234a89bc97e59fd860300 Mon Sep 17 00:00:00 2001 From: Mayank Kumar Date: Wed, 21 May 2025 06:42:46 +0000 Subject: [PATCH 08/10] reafactor: errors for hyper-v error handling --- Cargo.lock | 11 +- opentmk/opentmk/src/context.rs | 104 ++++---- opentmk/opentmk/src/main.rs | 3 +- opentmk/opentmk/src/tmk_assert.rs | 102 ++++---- opentmk/opentmk/src/tmkdefs.rs | 93 +++++++ opentmk/opentmk/src/uefi/hypvctx.rs | 231 ++++++++++++------ opentmk/opentmk/src/uefi/mod.rs | 8 +- opentmk/opentmk/src/uefi/tests/hv_misc.rs | 11 +- .../opentmk/src/uefi/tests/hv_processor.rs | 44 +++- 9 files changed, 415 insertions(+), 192 deletions(-) create mode 100644 opentmk/opentmk/src/tmkdefs.rs diff --git a/Cargo.lock b/Cargo.lock index 302b3ed249..af26549104 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4791,14 +4791,16 @@ dependencies = [ "hvdef", "lazy_static", "linked_list_allocator", + "log", "memory_range", "minimal_rt", "minimal_rt_build", "serde", "serde_json", - "spin 0.10.0", + "sync_nostd", "uefi", "x86_64", + "x86defs", "zerocopy 0.8.14", ] @@ -6538,6 +6540,13 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "sync_nostd" +version = "0.1.0" +dependencies = [ + "spin 0.10.0", +] + [[package]] name = "tap" version = "1.0.1" diff --git a/opentmk/opentmk/src/context.rs b/opentmk/opentmk/src/context.rs index da52e8ee32..80a8cd7890 100644 --- a/opentmk/opentmk/src/context.rs +++ b/opentmk/opentmk/src/context.rs @@ -4,77 +4,97 @@ use core::ops::Range; use alloc::boxed::Box; use hvdef::Vtl; +use crate::tmkdefs::TmkResult; + pub trait SecureInterceptPlatformTrait { - fn setup_secure_intercept(&mut self, interrupt_idx: u8); + fn setup_secure_intercept(&mut self, interrupt_idx: u8) -> TmkResult<()>; } pub trait InterruptPlatformTrait { - fn set_interrupt_idx(&mut self, interrupt_idx: u8, handler: fn()); - fn setup_interrupt_handler(&mut self); + fn set_interrupt_idx(&mut self, interrupt_idx: u8, handler: fn()) -> TmkResult<()>; + fn setup_interrupt_handler(&mut self) -> TmkResult<()>; } pub trait MsrPlatformTrait { - fn read_msr(&mut self, msr: u32) -> u64; - fn write_msr(&mut self, msr: u32, value: u64); + fn read_msr(&mut self, msr: u32) -> TmkResult; + fn write_msr(&mut self, msr: u32, value: u64) -> TmkResult<()>; } -pub trait VirtualProcessorlatformTrait { - fn get_register(&mut self, reg: u32) -> u128; - fn get_vp_count(&self) -> u32; - fn queue_command_vp(&mut self, cmd: VpExecutor); - fn start_on_vp(&mut self, cmd: VpExecutor); - fn start_running_vp_with_default_context(&mut self, cmd: VpExecutor); +pub trait VirtualProcessorPlatformTrait where T: VtlPlatformTrait { + fn get_register(&mut self, reg: u32) -> TmkResult; + fn get_vp_count(&self) -> TmkResult; + fn queue_command_vp(&mut self, cmd: VpExecutor) -> TmkResult<()>; + fn start_on_vp(&mut self, cmd: VpExecutor) -> TmkResult<()>; + fn start_running_vp_with_default_context(&mut self, cmd: VpExecutor) -> TmkResult<()>; } pub trait VtlPlatformTrait { - fn apply_vtl_protection_for_memory(&mut self, range: Range, vtl: Vtl); - fn enable_vp_vtl_with_default_context(&mut self, vp_index: u32, vtl: Vtl); - fn get_current_vtl(&self) -> Vtl; - fn set_default_ctx_to_vp(&mut self, vp_index: u32, vtl: Vtl); - fn setup_partition_vtl(&mut self, vtl: Vtl); - fn setup_vtl_protection(&mut self); + fn apply_vtl_protection_for_memory(&mut self, range: Range, vtl: Vtl) -> TmkResult<()>; + fn enable_vp_vtl_with_default_context(&mut self, vp_index: u32, vtl: Vtl) -> TmkResult<()>; + fn get_current_vtl(&self) -> TmkResult; + fn set_default_ctx_to_vp(&mut self, vp_index: u32, vtl: Vtl) -> TmkResult<()>; + fn setup_partition_vtl(&mut self, vtl: Vtl) -> TmkResult<()>; + fn setup_vtl_protection(&mut self) -> TmkResult<()>; fn switch_to_high_vtl(&mut self); fn switch_to_low_vtl(&mut self); } +pub trait X64PlatformTrait {} +pub trait Aarch64PlatformTrait {} - - -pub trait TestCtxTrait { - +pub trait TestCtxTrait { // partition wide Traits + /// Returns the number of virtual processors (VPs) in the partition. fn get_vp_count(&self) -> u32; - fn setup_vtl_protection(&mut self); - fn setup_partition_vtl(&mut self, vtl: Vtl); - fn setup_interrupt_handler(&mut self); - fn set_interrupt_idx(&mut self, interrupt_idx: u8, handler: fn()); - fn start_on_vp(&mut self, cmd: VpExecutor); - fn queue_command_vp(&mut self, cmd: VpExecutor); - fn setup_secure_intercept(&mut self, interrupt_idx: u8); - fn apply_vtl_protection_for_memory(&mut self, range: Range, vtl: Vtl); - fn set_default_ctx_to_vp(&mut self, vp_index: u32, vtl: Vtl); - fn start_running_vp_with_default_context(&mut self, cmd: VpExecutor); - fn enable_vp_vtl_with_default_context(&mut self, vp_index: u32, vtl: Vtl); - fn write_msr(&mut self, msr: u32, value: u64); - fn read_msr(&mut self, msr: u32) -> u64; + /// Sets up VTL (Virtualization Trust Level) protection for the partition. + fn setup_vtl_protection(&mut self) -> TmkResult<()>; + /// Sets up a specific VTL for the partition. + fn setup_partition_vtl(&mut self, vtl: Vtl) -> TmkResult<()>; + /// Sets up the interrupt handler for the partition. + fn setup_interrupt_handler(&mut self) -> TmkResult<()>; + /// Sets the interrupt handler for a specific interrupt index. + fn set_interrupt_idx(&mut self, interrupt_idx: u8, handler: fn()) -> TmkResult<()>; + /// Starts a command on a specific virtual processor. + fn start_on_vp(&mut self, cmd: VpExecutor) -> TmkResult<()>; + /// Queues a command to be executed on a virtual processor. + fn queue_command_vp(&mut self, cmd: VpExecutor) -> TmkResult<()>; + /// Sets up a secure intercept for a given interrupt index. + fn setup_secure_intercept(&mut self, interrupt_idx: u8) -> TmkResult<()>; + /// Applies VTL protection to a specified memory range. + fn apply_vtl_protection_for_memory(&mut self, range: Range, vtl: Vtl) -> TmkResult<()>; + /// Sets the default context for a specific virtual processor and VTL. + fn set_default_ctx_to_vp(&mut self, vp_index: u32, vtl: Vtl) -> TmkResult<()>; + /// Starts running a virtual processor with its default context. + fn start_running_vp_with_default_context(&mut self, cmd: VpExecutor)-> TmkResult<()>; + /// Enables VTL for a virtual processor using its default context. + fn enable_vp_vtl_with_default_context(&mut self, vp_index: u32, vtl: Vtl)-> TmkResult<()>; + /// Writes a value to a specific Model Specific Register (MSR). + fn write_msr(&mut self, msr: u32, value: u64)-> TmkResult<()>; + /// Reads a value from a specific Model Specific Register (MSR). + fn read_msr(&mut self, msr: u32) -> TmkResult; // per vp wide Traits - fn get_current_vp(&self) -> u32; - fn get_current_vtl(&self) -> Vtl; + /// Gets the index of the current virtual processor. + fn get_current_vp(&self) -> TmkResult; + /// Gets the current VTL of the virtual processor. + fn get_current_vtl(&self) -> TmkResult; + /// Switches the current virtual processor to a higher VTL. fn switch_to_high_vtl(&mut self); + /// Switches the current virtual processor to a lower VTL. fn switch_to_low_vtl(&mut self); - fn get_register(&mut self, reg: u32) -> u128; + /// Gets the value of a specific register for the current virtual processor. + fn get_register(&mut self, reg: u32) -> TmkResult; } -pub struct VpExecutor { +pub struct VpExecutor { vp_index: u32, vtl: Vtl, - cmd: Option>, + cmd: Option>, } -impl VpExecutor { +impl VpExecutor { pub fn new(vp_index: u32, vtl: Vtl) -> Self { VpExecutor { vp_index, @@ -83,12 +103,12 @@ impl VpExecutor { } } - pub fn command(mut self, cmd: impl FnOnce(&mut dyn TestCtxTrait) + 'static) -> Self { + pub fn command(mut self, cmd: impl FnOnce(&mut T) + 'static) -> Self { self.cmd = Some(Box::new(cmd)); self } - pub fn get(mut self) -> (u32, Vtl, Option>) { + pub fn get(mut self) -> (u32, Vtl, Option>) { let cmd = self.cmd.take(); (self.vp_index, self.vtl, cmd) } diff --git a/opentmk/opentmk/src/main.rs b/opentmk/opentmk/src/main.rs index dc49cd0347..f8ffeb4ab8 100644 --- a/opentmk/opentmk/src/main.rs +++ b/opentmk/opentmk/src/main.rs @@ -21,4 +21,5 @@ pub mod arch; pub mod tmk_assert; pub mod tmk_logger; pub mod hypercall; -pub mod context; \ No newline at end of file +pub mod context; +pub mod tmkdefs; \ No newline at end of file diff --git a/opentmk/opentmk/src/tmk_assert.rs b/opentmk/opentmk/src/tmk_assert.rs index dad6e46dfe..232ee7acb1 100644 --- a/opentmk/opentmk/src/tmk_assert.rs +++ b/opentmk/opentmk/src/tmk_assert.rs @@ -1,4 +1,4 @@ -use core::{any::type_name, fmt::Write}; +use core::fmt::Write; use alloc::string::{String, ToString}; use serde::Serialize; use serde_json::json; @@ -62,54 +62,54 @@ pub trait AssertOption { fn expect_assert(self, message: &str) -> T; } -impl AssertOption for Option { - fn expect_assert(self, message: &str) -> T { - match self { - Some(value) => value, - None => { - let call: &core::panic::Location<'_> = core::panic::Location::caller(); - let file_line = format!("{}:{}", call.file(), call.line()); - let expn = type_name::>(); - let js = format_assert_json_string(expn, true, file_line, false, &message); - write_str(&js); - panic!("Assertion failed: {}", message); - } - } - } -} +// impl AssertOption for Option { +// fn expect_assert(self, message: &str) -> T { +// match self { +// Some(value) => value, +// None => { +// let call: &core::panic::Location<'_> = core::panic::Location::caller(); +// let file_line = format!("{}:{}", call.file(), call.line()); +// let expn = type_name::>(); +// let js = format_assert_json_string(expn, true, file_line, false, &message); +// write_str(&js); +// panic!("Assertion failed: {}", message); +// } +// } +// } +// } -impl AssertResult for Result -where - E: core::fmt::Debug, -{ - fn unpack_assert(self) -> T { - match self { - Ok(value) => value, - Err(err) => { - let call: &core::panic::Location<'_> = core::panic::Location::caller(); - let file_line = format!("{}:{}", call.file(), call.line()); - let expn = type_name::>(); - let js = - format_assert_json_string(expn, true, file_line, false, &"ResultTest"); - write_str(&js); - panic!("Assertion failed: {:?}", err); - } - } - } - fn expect_assert(self, message: &str) -> T { - match self { - Ok(value) => { - log::info!("result is ok, condition not met for: {}", message); - value - } - Err(err) => { - let call: &core::panic::Location<'_> = core::panic::Location::caller(); - let file_line = format!("{}:{}", call.file(), call.line()); - let expn = type_name::>(); - let js = format_assert_json_string(expn, true, file_line, false, &message); - write_str(&js); - panic!("Assertion failed: {:?}", err); - } - } - } -} +// impl AssertResult for Result +// where +// E: core::fmt::Debug, +// { +// fn unpack_assert(self) -> T { +// match self { +// Ok(value) => value, +// Err(err) => { +// let call: &core::panic::Location<'_> = core::panic::Location::caller(); +// let file_line = format!("{}:{}", call.file(), call.line()); +// let expn = type_name::>(); +// let js = +// format_assert_json_string(expn, true, file_line, false, &"ResultTest"); +// write_str(&js); +// panic!("Assertion failed: {:?}", err); +// } +// } +// } +// fn expect_assert(self, message: &str) -> T { +// match self { +// Ok(value) => { +// log::info!("result is ok, condition not met for: {}", message); +// value +// } +// Err(err) => { +// let call: &core::panic::Location<'_> = core::panic::Location::caller(); +// let file_line = format!("{}:{}", call.file(), call.line()); +// let expn = type_name::>(); +// let js = format_assert_json_string(expn, true, file_line, false, &message); +// write_str(&js); +// panic!("Assertion failed: {:?}", err); +// } +// } +// } +// } diff --git a/opentmk/opentmk/src/tmkdefs.rs b/opentmk/opentmk/src/tmkdefs.rs new file mode 100644 index 0000000000..57f9faaa3f --- /dev/null +++ b/opentmk/opentmk/src/tmkdefs.rs @@ -0,0 +1,93 @@ +#[derive(Copy, Clone, Debug, Eq, PartialEq)] +pub enum TmkErrorType { + AllocationFailed, + InvalidParameter, + EnableVtlFailed, + SetDefaultCtxFailed, + StartVpFailed, + QueueCommandFailed, + SetupVtlProtectionFailed, + SetupPartitionVtlFailed, + SetupInterruptHandlerFailed, + SetInterruptIdxFailed, + SetupSecureInterceptFailed, + ApplyVtlProtectionForMemoryFailed, + ReadMsrFailed, + WriteMsrFailed, + GetRegisterFailed, + InvalidHypercallCode, + InvalidHypercallInput, + InvalidAlignment, + AccessDenied, + InvalidPartitionState, + OperationDenied, + UnknownProperty, + PropertyValueOutOfRange, + InsufficientMemory, + PartitionTooDeep, + InvalidPartitionId, + InvalidVpIndex, + NotFound, + InvalidPortId, + InvalidConnectionId, + InsufficientBuffers, + NotAcknowledged, + InvalidVpState, + Acknowledged, + InvalidSaveRestoreState, + InvalidSynicState, + ObjectInUse, + InvalidProximityDomainInfo, + NoData, + Inactive, + NoResources, + FeatureUnavailable, + PartialPacket, + ProcessorFeatureNotSupported, + ProcessorCacheLineFlushSizeIncompatible, + InsufficientBuffer, + IncompatibleProcessor, + InsufficientDeviceDomains, + CpuidFeatureValidationError, + CpuidXsaveFeatureValidationError, + ProcessorStartupTimeout, + SmxEnabled, + InvalidLpIndex, + InvalidRegisterValue, + InvalidVtlState, + NxNotDetected, + InvalidDeviceId, + InvalidDeviceState, + PendingPageRequests, + PageRequestInvalid, + KeyAlreadyExists, + DeviceAlreadyInDomain, + InvalidCpuGroupId, + InvalidCpuGroupState, + OperationFailed, + NotAllowedWithNestedVirtActive, + InsufficientRootMemory, + EventBufferAlreadyFreed, + Timeout, + VtlAlreadyEnabled, + UnknownRegisterName, +} + +#[derive(Copy, Clone, Debug, Eq, PartialEq)] +pub struct TmkError(pub TmkErrorType); + +pub type TmkResult = Result; + +impl core::error::Error for TmkError {} + +impl core::fmt::Display for TmkError { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!(f, "TmkError({:?})", self.0) + } +} + +impl From for TmkError { + fn from(e: TmkErrorType) -> Self { + TmkError(e) + } +} diff --git a/opentmk/opentmk/src/uefi/hypvctx.rs b/opentmk/opentmk/src/uefi/hypvctx.rs index bf4979568e..c72dbd0183 100644 --- a/opentmk/opentmk/src/uefi/hypvctx.rs +++ b/opentmk/opentmk/src/uefi/hypvctx.rs @@ -1,13 +1,13 @@ use crate::{ - context::{TestCtxTrait, VpExecutor}, + context::{TestCtxTrait, VpExecutor, VtlPlatformTrait}, hypercall::HvCall, + tmkdefs::{TmkError, TmkErrorType, TmkResult}, }; use crate::uefi::alloc::ALLOCATOR; -use crate::tmk_assert::AssertResult; -use crate::tmk_assert::AssertOption; -use alloc::collections::btree_map::BTreeMap; + +use alloc::collections::{btree_map::BTreeMap, btree_set::BTreeSet}; use alloc::collections::linked_list::LinkedList; -use alloc::{boxed::Box, vec::Vec}; +use alloc::boxed::Box; use core::alloc::{GlobalAlloc, Layout}; use core::arch::asm; use core::ops::Range; @@ -20,7 +20,7 @@ use sync_nostd::Mutex; const ALIGNMENT: usize = 4096; type ComandTable = - BTreeMap, Vtl)>>; + BTreeMap, Vtl)>>; static mut CMD: Mutex = Mutex::new(BTreeMap::new()); #[allow(static_mut_refs)] @@ -40,7 +40,7 @@ fn register_command_queue(vp_index: u32) { pub struct HvTestCtx { pub hvcall: HvCall, - pub vp_runing: Vec<(u32, (bool, bool))>, + pub vp_runing: BTreeSet, pub my_vp_idx: u32, pub my_vtl: Vtl, } @@ -116,25 +116,22 @@ impl Drop for HvTestCtx { /// /// - `get_current_vtl(&self) -> Vtl`: /// Returns the current Virtual Trust Level (VTL). -impl TestCtxTrait for HvTestCtx { - fn start_on_vp(&mut self, cmd: VpExecutor) { +impl TestCtxTrait for HvTestCtx { + fn start_on_vp(&mut self, cmd: VpExecutor) -> TmkResult<()> { let (vp_index, vtl, cmd) = cmd.get(); - let cmd = cmd.expect_assert("error: failed to get command as cmd is none"); + let cmd = cmd.ok_or_else(|| TmkError(TmkErrorType::InvalidParameter))?; if vtl >= Vtl::Vtl2 { panic!("error: can't run on vtl2"); } - let is_vp_running = self.vp_runing.iter_mut().find(|x| x.0 == vp_index); - + let is_vp_running = self.vp_runing.get(&vp_index); if let Some(_running_vtl) = is_vp_running { log::debug!("both vtl0 and vtl1 are running for VP: {:?}", vp_index); } else { if vp_index == 0 { let vp_context = self - .get_default_context() - .expect("error: failed to get default context"); + .get_default_context()?; self.hvcall - .enable_vp_vtl(0, Vtl::Vtl1, Some(vp_context)) - .expect("error: failed to enable vtl1"); + .enable_vp_vtl(0, Vtl::Vtl1, Some(vp_context))?; cmdt().lock().get_mut(&vp_index).unwrap().push_back(( Box::new(move |ctx| { @@ -143,20 +140,14 @@ impl TestCtxTrait for HvTestCtx { Vtl::Vtl1, )); self.switch_to_high_vtl(); - self.vp_runing.push((vp_index, (true, true))); + self.vp_runing.insert(vp_index); } else { cmdt().lock().get_mut(&self.my_vp_idx).unwrap().push_back(( Box::new(move |ctx| { - ctx.enable_vp_vtl_with_default_context(vp_index, Vtl::Vtl1); - ctx.start_running_vp_with_default_context(VpExecutor::new( + _ = ctx.enable_vp_vtl_with_default_context(vp_index, Vtl::Vtl1); + _ = ctx.start_running_vp_with_default_context(VpExecutor::new( vp_index, - Vtl::Vtl1, - )); - cmdt().lock().get_mut(&vp_index).unwrap().push_back(( - Box::new(move |ctx| { - ctx.set_default_ctx_to_vp(vp_index, Vtl::Vtl0); - }), - Vtl::Vtl1, + Vtl::Vtl0, )); ctx.switch_to_low_vtl(); }), @@ -164,7 +155,7 @@ impl TestCtxTrait for HvTestCtx { )); self.switch_to_high_vtl(); - self.vp_runing.push((vp_index, (true, true))); + self.vp_runing.insert(vp_index); } } cmdt() @@ -179,17 +170,19 @@ impl TestCtxTrait for HvTestCtx { self.switch_to_high_vtl(); } } + Ok(()) } - fn queue_command_vp(&mut self, cmd: VpExecutor) { + fn queue_command_vp(&mut self, cmd: VpExecutor) -> TmkResult<()> { let (vp_index, vtl, cmd) = cmd.get(); let cmd = - cmd.expect_assert("error: failed to get command as cmd is none with queue command vp"); + cmd.ok_or_else(|| TmkError(TmkErrorType::QueueCommandFailed))?; cmdt() .lock() .get_mut(&vp_index) .unwrap() .push_back((cmd, vtl)); + Ok(()) } fn switch_to_high_vtl(&mut self) { @@ -200,27 +193,27 @@ impl TestCtxTrait for HvTestCtx { HvCall::vtl_return(); } - fn setup_partition_vtl(&mut self, vtl: Vtl) { + fn setup_partition_vtl(&mut self, vtl: Vtl) -> TmkResult<()> { self.hvcall - .enable_partition_vtl(hvdef::HV_PARTITION_ID_SELF, vtl) - .expect_assert("Failed to enable VTL1 for the partition"); + .enable_partition_vtl(hvdef::HV_PARTITION_ID_SELF, vtl)?; log::info!("enabled vtl protections for the partition."); + Ok(()) } - fn setup_interrupt_handler(&mut self) { + fn setup_interrupt_handler(&mut self) -> TmkResult<()> { crate::arch::interrupt::init(); + Ok(()) } - fn setup_vtl_protection(&mut self) { + fn setup_vtl_protection(&mut self) -> TmkResult<()> { self.hvcall - .enable_vtl_protection(HvInputVtl::CURRENT_VTL) - .expect_assert("Failed to enable VTL protection, vtl1"); + .enable_vtl_protection(HvInputVtl::CURRENT_VTL)?; log::info!("enabled vtl protections for the partition."); + Ok(()) } - fn setup_secure_intercept(&mut self, interrupt_idx: u8) { - let layout = Layout::from_size_align(4096, ALIGNMENT) - .expect_assert("error: failed to create layout for SIMP page"); + fn setup_secure_intercept(&mut self, interrupt_idx: u8) -> TmkResult<()> { + let layout = Layout::from_size_align(4096, ALIGNMENT).or_else(|_| Err(TmkError(TmkErrorType::AllocationFailed)))?; let ptr = unsafe { ALLOCATOR.alloc(layout) }; let gpn = (ptr as u64) >> 12; @@ -235,43 +228,44 @@ impl TestCtxTrait for HvTestCtx { reg.set_masked(false); reg.set_auto_eoi(true); - self.write_msr(hvdef::HV_X64_MSR_SINT0, reg.into()); + self.write_msr(hvdef::HV_X64_MSR_SINT0, reg.into())?; log::info!("Successfuly set the SINT0 register."); + Ok(()) } - fn apply_vtl_protection_for_memory(&mut self, range: Range, vtl: Vtl) { + fn apply_vtl_protection_for_memory(&mut self, range: Range, vtl: Vtl) -> TmkResult<()> { self.hvcall - .apply_vtl_protections(MemoryRange::new(range), vtl) - .expect_assert("Failed to apply VTL protections"); + .apply_vtl_protections(MemoryRange::new(range), vtl)?; + Ok(()) } - fn write_msr(&mut self, msr: u32, value: u64) { + fn write_msr(&mut self, msr: u32, value: u64) -> TmkResult<()> { unsafe { write_msr(msr, value) }; + Ok(()) } - fn read_msr(&mut self, msr: u32) -> u64 { - unsafe { read_msr(msr) } + fn read_msr(&mut self, msr: u32) -> TmkResult { + let r = unsafe { read_msr(msr) }; + Ok(r) } - fn start_running_vp_with_default_context(&mut self, cmd: VpExecutor) { + fn start_running_vp_with_default_context(&mut self, cmd: VpExecutor) -> TmkResult<()> { let (vp_index, vtl, _cmd) = cmd.get(); let vp_ctx = self - .get_default_context() - .expect_assert("error: failed to get default context"); + .get_default_context()?; self.hvcall - .start_virtual_processor(vp_index, vtl, Some(vp_ctx)) - .expect_assert("error: failed to start vp"); + .start_virtual_processor(vp_index, vtl, Some(vp_ctx))?; + Ok(()) } - fn set_default_ctx_to_vp(&mut self, vp_index: u32, vtl: Vtl) { + fn set_default_ctx_to_vp(&mut self, vp_index: u32, vtl: Vtl) -> TmkResult<()> { let i: u8 = match vtl { Vtl::Vtl0 => 0, Vtl::Vtl1 => 1, Vtl::Vtl2 => 2, }; let vp_context = self - .get_default_context() - .expect_assert("error: failed to get default context"); + .get_default_context()?; self.hvcall .set_vp_registers( vp_index, @@ -281,22 +275,22 @@ impl TestCtxTrait for HvTestCtx { .with_use_target_vtl(true), ), Some(vp_context), - ) - .expect_assert("error: failed to set vp registers"); + )?; + Ok(()) } - fn enable_vp_vtl_with_default_context(&mut self, vp_index: u32, vtl: Vtl) { + fn enable_vp_vtl_with_default_context(&mut self, vp_index: u32, vtl: Vtl) -> TmkResult<()> { let vp_ctx = self - .get_default_context() - .expect_assert("error: failed to get default context"); + .get_default_context()?; self.hvcall - .enable_vp_vtl(vp_index, vtl, Some(vp_ctx)) - .expect_assert("error: failed to enable vp vtl"); + .enable_vp_vtl(vp_index, vtl, Some(vp_ctx))?; + Ok(()) } #[cfg(target_arch = "x86_64")] - fn set_interrupt_idx(&mut self, interrupt_idx: u8, handler: fn()) { + fn set_interrupt_idx(&mut self, interrupt_idx: u8, handler: fn()) -> TmkResult<()> { crate::arch::interrupt::set_handler(interrupt_idx, handler); + Ok(()) } #[cfg(target_arch = "x86_64")] @@ -322,33 +316,33 @@ impl TestCtxTrait for HvTestCtx { } #[cfg(target_arch = "x86_64")] - fn get_register(&mut self, reg: u32) -> u128 { + fn get_register(&mut self, reg: u32) -> TmkResult { use hvdef::HvX64RegisterName; let reg = HvX64RegisterName(reg); - self.hvcall - .get_register(reg.into(), None) - .expect_assert("error: failed to get register") - .as_u128() + let val = self.hvcall + .get_register(reg.into(), None)? + .as_u128(); + Ok(val) } #[cfg(target_arch = "aarch64")] - fn get_register(&mut self, reg: u32) -> u128 { + fn get_register(&mut self, reg: u32) -> TmkResult { use hvdef::HvAarch64RegisterName; let reg = HvAarch64RegisterName(reg); - self.hvcall - .get_register(reg.into(), None) - .expect_assert("error: failed to get register") - .as_u128() + let val = self.hvcall + .get_register(reg.into(), None)? + .as_u128(); + Ok(val) } - fn get_current_vp(&self) -> u32 { - self.my_vp_idx + fn get_current_vp(&self) -> TmkResult { + Ok(self.my_vp_idx) } - fn get_current_vtl(&self) -> Vtl { - self.my_vtl + fn get_current_vtl(&self) -> TmkResult { + Ok(self.my_vtl) } } @@ -356,7 +350,7 @@ impl HvTestCtx { pub const fn new() -> Self { HvTestCtx { hvcall: HvCall::new(), - vp_runing: Vec::new(), + vp_runing: BTreeSet::new(), my_vp_idx: 0, my_vtl: Vtl::Vtl0, } @@ -383,7 +377,7 @@ impl HvTestCtx { loop { let mut vtl: Option = None; - let mut cmd: Option> = None; + let mut cmd: Option> = None; { let mut cmdt = cmdt().lock(); @@ -417,12 +411,12 @@ impl HvTestCtx { } #[cfg(target_arch = "x86_64")] - fn get_default_context(&mut self) -> Result { + fn get_default_context(&mut self) -> Result { return self.run_fn_with_current_context(HvTestCtx::exec_handler); } #[cfg(target_arch = "x86_64")] - fn run_fn_with_current_context(&mut self, func: fn()) -> Result { + fn run_fn_with_current_context(&mut self, func: fn()) -> Result { use super::alloc::SIZE_1MB; let mut vp_context: InitialVpContextX64 = self @@ -433,7 +427,7 @@ impl HvTestCtx { .expect("Failed to create layout for stack allocation"); let allocated_stack_ptr = unsafe { ALLOCATOR.alloc(stack_layout) }; if allocated_stack_ptr.is_null() { - return Err(false); + return Err(TmkErrorType::AllocationFailed.into()); } let stack_size = stack_layout.size(); let stack_top = allocated_stack_ptr as u64 + stack_size as u64; @@ -444,3 +438,78 @@ impl HvTestCtx { Ok(vp_context) } } + +impl From for TmkError { + fn from(e: hvdef::HvError) -> Self { + log::debug!("Converting hvdef::HvError::{:?} to TmkError", e); + let tmk_error_type = match e { + hvdef::HvError::InvalidHypercallCode => TmkErrorType::InvalidHypercallCode, + hvdef::HvError::InvalidHypercallInput => TmkErrorType::InvalidHypercallInput, + hvdef::HvError::InvalidAlignment => TmkErrorType::InvalidAlignment, + hvdef::HvError::InvalidParameter => TmkErrorType::InvalidParameter, + hvdef::HvError::AccessDenied => TmkErrorType::AccessDenied, + hvdef::HvError::InvalidPartitionState => TmkErrorType::InvalidPartitionState, + hvdef::HvError::OperationDenied => TmkErrorType::OperationDenied, + hvdef::HvError::UnknownProperty => TmkErrorType::UnknownProperty, + hvdef::HvError::PropertyValueOutOfRange => TmkErrorType::PropertyValueOutOfRange, + hvdef::HvError::InsufficientMemory => TmkErrorType::InsufficientMemory, + hvdef::HvError::PartitionTooDeep => TmkErrorType::PartitionTooDeep, + hvdef::HvError::InvalidPartitionId => TmkErrorType::InvalidPartitionId, + hvdef::HvError::InvalidVpIndex => TmkErrorType::InvalidVpIndex, + hvdef::HvError::NotFound => TmkErrorType::NotFound, + hvdef::HvError::InvalidPortId => TmkErrorType::InvalidPortId, + hvdef::HvError::InvalidConnectionId => TmkErrorType::InvalidConnectionId, + hvdef::HvError::InsufficientBuffers => TmkErrorType::InsufficientBuffers, + hvdef::HvError::NotAcknowledged => TmkErrorType::NotAcknowledged, + hvdef::HvError::InvalidVpState => TmkErrorType::InvalidVpState, + hvdef::HvError::Acknowledged => TmkErrorType::Acknowledged, + hvdef::HvError::InvalidSaveRestoreState => TmkErrorType::InvalidSaveRestoreState, + hvdef::HvError::InvalidSynicState => TmkErrorType::InvalidSynicState, + hvdef::HvError::ObjectInUse => TmkErrorType::ObjectInUse, + hvdef::HvError::InvalidProximityDomainInfo => TmkErrorType::InvalidProximityDomainInfo, + hvdef::HvError::NoData => TmkErrorType::NoData, + hvdef::HvError::Inactive => TmkErrorType::Inactive, + hvdef::HvError::NoResources => TmkErrorType::NoResources, + hvdef::HvError::FeatureUnavailable => TmkErrorType::FeatureUnavailable, + hvdef::HvError::PartialPacket => TmkErrorType::PartialPacket, + hvdef::HvError::ProcessorFeatureNotSupported => TmkErrorType::ProcessorFeatureNotSupported, + hvdef::HvError::ProcessorCacheLineFlushSizeIncompatible => TmkErrorType::ProcessorCacheLineFlushSizeIncompatible, + hvdef::HvError::InsufficientBuffer => TmkErrorType::InsufficientBuffer, + hvdef::HvError::IncompatibleProcessor => TmkErrorType::IncompatibleProcessor, + hvdef::HvError::InsufficientDeviceDomains => TmkErrorType::InsufficientDeviceDomains, + hvdef::HvError::CpuidFeatureValidationError => TmkErrorType::CpuidFeatureValidationError, + hvdef::HvError::CpuidXsaveFeatureValidationError => TmkErrorType::CpuidXsaveFeatureValidationError, + hvdef::HvError::ProcessorStartupTimeout => TmkErrorType::ProcessorStartupTimeout, + hvdef::HvError::SmxEnabled => TmkErrorType::SmxEnabled, + hvdef::HvError::InvalidLpIndex => TmkErrorType::InvalidLpIndex, + hvdef::HvError::InvalidRegisterValue => TmkErrorType::InvalidRegisterValue, + hvdef::HvError::InvalidVtlState => TmkErrorType::InvalidVtlState, + hvdef::HvError::NxNotDetected => TmkErrorType::NxNotDetected, + hvdef::HvError::InvalidDeviceId => TmkErrorType::InvalidDeviceId, + hvdef::HvError::InvalidDeviceState => TmkErrorType::InvalidDeviceState, + hvdef::HvError::PendingPageRequests => TmkErrorType::PendingPageRequests, + hvdef::HvError::PageRequestInvalid => TmkErrorType::PageRequestInvalid, + hvdef::HvError::KeyAlreadyExists => TmkErrorType::KeyAlreadyExists, + hvdef::HvError::DeviceAlreadyInDomain => TmkErrorType::DeviceAlreadyInDomain, + hvdef::HvError::InvalidCpuGroupId => TmkErrorType::InvalidCpuGroupId, + hvdef::HvError::InvalidCpuGroupState => TmkErrorType::InvalidCpuGroupState, + hvdef::HvError::OperationFailed => TmkErrorType::OperationFailed, + hvdef::HvError::NotAllowedWithNestedVirtActive => TmkErrorType::NotAllowedWithNestedVirtActive, + hvdef::HvError::InsufficientRootMemory => TmkErrorType::InsufficientRootMemory, + hvdef::HvError::EventBufferAlreadyFreed => TmkErrorType::EventBufferAlreadyFreed, + hvdef::HvError::Timeout => TmkErrorType::Timeout, + hvdef::HvError::VtlAlreadyEnabled => TmkErrorType::VtlAlreadyEnabled, + hvdef::HvError::UnknownRegisterName => TmkErrorType::UnknownRegisterName, + // Add any other specific mappings here if hvdef::HvError has more variants + _ => { + log::warn!( + "Unhandled hvdef::HvError variant: {:?}. Mapping to TmkErrorType::OperationFailed.", + e + ); + TmkErrorType::OperationFailed // Generic fallback + } + }; + log::debug!("Mapped hvdef::HvError::{:?} to TmkErrorType::{:?}", e, tmk_error_type); + TmkError(tmk_error_type) + } +} \ No newline at end of file diff --git a/opentmk/opentmk/src/uefi/mod.rs b/opentmk/opentmk/src/uefi/mod.rs index 1a5e04d747..a1e677259d 100644 --- a/opentmk/opentmk/src/uefi/mod.rs +++ b/opentmk/opentmk/src/uefi/mod.rs @@ -7,14 +7,18 @@ pub mod init; mod rt; mod tests; -use crate::tmk_assert::AssertResult; +use crate::tmk_assert; use init::init; use uefi::entry; use uefi::Status; #[entry] fn uefi_main() -> Status { - init().expect_assert("Failed to initialize environment"); + let r= init(); + tmk_assert!(r.is_ok(), "init should succeed"); + + log::warn!("TEST_START"); tests::run_test(); + log::warn!("TEST_END"); Status::SUCCESS } diff --git a/opentmk/opentmk/src/uefi/tests/hv_misc.rs b/opentmk/opentmk/src/uefi/tests/hv_misc.rs index 9e4f27bdcd..681d8baa3b 100644 --- a/opentmk/opentmk/src/uefi/tests/hv_misc.rs +++ b/opentmk/opentmk/src/uefi/tests/hv_misc.rs @@ -4,6 +4,7 @@ // This test is to verify that the VTL protections are working as expected. // The stack values in VTL0 are changing after interrupt handling in VTL1. use crate::tmk_assert::{AssertOption, AssertResult}; +use crate::tmkdefs::TmkResult; use sync_nostd::{Channel, Receiver, Sender}; use crate::uefi::alloc::{ALLOCATOR, SIZE_1MB}; use crate::{context, uefi::hypvctx}; @@ -26,7 +27,7 @@ use uefi::Status; static mut HEAPX: RefCell<*mut u8> = RefCell::new(0 as *mut u8); static mut CON: AtomicI32 = AtomicI32::new(0); -pub fn exec(ctx: &mut hypvctx::HvTestCtx ) { +pub fn exec(ctx: &mut T) where T: TestCtxTrait { log::info!("ctx ptr: {:p}", &ctx as *const _); let mut vp_count = ctx.get_vp_count(); @@ -39,7 +40,7 @@ pub fn exec(ctx: &mut hypvctx::HvTestCtx ) { ctx.setup_partition_vtl(Vtl::Vtl1); ctx.start_on_vp( - VpExecutor::new(0, Vtl::Vtl1).command(move |ctx: &mut dyn TestCtxTrait| { + VpExecutor::new(0, Vtl::Vtl1).command(move |ctx: &mut T|{ log::info!("successfully started running VTL1 on vp0."); ctx.setup_secure_intercept(0x30); ctx.set_interrupt_idx(0x30, || { @@ -49,7 +50,9 @@ pub fn exec(ctx: &mut hypvctx::HvTestCtx ) { hv_test_ctx.init(); let c = hv_test_ctx.get_register(HvAllArchRegisterName::VsmVpStatus.0); - + tmk_assert!(c.is_ok(), "read should succeed"); + + let c = c.unwrap(); let cp = HvRegisterVsmVpStatus::from_bits(c as u64); log::info!("VSM VP Status: {:?}", cp); @@ -85,7 +88,7 @@ pub fn exec(ctx: &mut hypvctx::HvTestCtx ) { }), ); - ctx.queue_command_vp(VpExecutor::new(0, Vtl::Vtl1).command(move |ctx| { + ctx.queue_command_vp(VpExecutor::new(0, Vtl::Vtl1).command(move |ctx: &mut T| { log::info!("successfully started running VTL1 on vp0."); ctx.switch_to_low_vtl(); })); diff --git a/opentmk/opentmk/src/uefi/tests/hv_processor.rs b/opentmk/opentmk/src/uefi/tests/hv_processor.rs index b8fbee2ed0..7ef48d0f1f 100644 --- a/opentmk/opentmk/src/uefi/tests/hv_processor.rs +++ b/opentmk/opentmk/src/uefi/tests/hv_processor.rs @@ -5,29 +5,39 @@ use crate::{ tmk_assert, context::{TestCtxTrait, VpExecutor} }; -pub fn exec(ctx: &mut dyn TestCtxTrait) { - ctx.setup_interrupt_handler(); - ctx.setup_partition_vtl(Vtl::Vtl1); +pub fn exec(ctx: &mut T) where T: TestCtxTrait { + let r = ctx.setup_interrupt_handler(); + tmk_assert!(r.is_ok(), "setup_interrupt_handler should succeed"); + let r = ctx.setup_partition_vtl(Vtl::Vtl1); + tmk_assert!(r.is_ok(), "setup_partition_vtl should succeed"); + let vp_count = ctx.get_vp_count(); tmk_assert!(vp_count == 8, "vp count should be 8"); // Testing BSP VTL Bringup { let (tx, rx) = Channel::new().split(); - ctx.start_on_vp(VpExecutor::new(0, Vtl::Vtl1).command( - move |ctx: &mut dyn TestCtxTrait| { + let result = ctx.start_on_vp(VpExecutor::new(0, Vtl::Vtl1).command( + move |ctx: &mut T| { let vp = ctx.get_current_vp(); + tmk_assert!(vp.is_ok(), "vp should be valid"); + + let vp = vp.unwrap(); log::info!("vp: {}", vp); tmk_assert!(vp == 0, "vp should be equal to 0"); let vtl = ctx.get_current_vtl(); + tmk_assert!(vtl.is_ok(), "vtl should be valid"); + + let vtl = vtl.unwrap(); log::info!("vtl: {:?}", vtl); tmk_assert!(vtl == Vtl::Vtl1, "vtl should be Vtl1 for BSP"); - _ = tx.send(()); + tx.send(()).expect("Failed to send message through the channel"); ctx.switch_to_low_vtl(); }, )); + tmk_assert!(result.is_ok(), "start_on_vp should succeed"); _ = rx.recv(); } @@ -35,36 +45,50 @@ pub fn exec(ctx: &mut dyn TestCtxTrait) { // Testing VTL1 { let (tx, rx) = Channel::new().split(); - ctx.start_on_vp(VpExecutor::new(i, Vtl::Vtl1).command( - move |ctx: &mut dyn TestCtxTrait| { + let result = ctx.start_on_vp(VpExecutor::new(i, Vtl::Vtl1).command( + move |ctx: &mut T| { let vp = ctx.get_current_vp(); + tmk_assert!(vp.is_ok(), "vp should be valid"); + + let vp = vp.unwrap(); log::info!("vp: {}", vp); tmk_assert!(vp == i, format!("vp should be equal to {}", i)); let vtl = ctx.get_current_vtl(); + tmk_assert!(vtl.is_ok(), "vtl should be valid"); + + let vtl = vtl.unwrap(); log::info!("vtl: {:?}", vtl); tmk_assert!(vtl == Vtl::Vtl1, format!("vtl should be Vtl1 for VP {}", i)); _ = tx.send(()); }, )); + tmk_assert!(result.is_ok(), "start_on_vp should succeed"); _ = rx.recv(); } // Testing VTL0 { let (tx, rx) = Channel::new().split(); - ctx.start_on_vp(VpExecutor::new(i, Vtl::Vtl0).command( - move |ctx: &mut dyn TestCtxTrait| { + let result = ctx.start_on_vp(VpExecutor::new(i, Vtl::Vtl0).command( + move |ctx: &mut T| { let vp = ctx.get_current_vp(); + tmk_assert!(vp.is_ok(), "vp should be valid"); + + let vp = vp.unwrap(); log::info!("vp: {}", vp); tmk_assert!(vp == i, format!("vp should be equal to {}", i)); let vtl = ctx.get_current_vtl(); + tmk_assert!(vtl.is_ok(), "vtl should be valid"); + + let vtl = vtl.unwrap(); log::info!("vtl: {:?}", vtl); tmk_assert!(vtl == Vtl::Vtl0, format!("vtl should be Vtl0 for VP {}", i)); _ = tx.send(()); }, )); + tmk_assert!(result.is_ok(), "start_on_vp should succeed"); _ = rx.recv(); } } From 4cd03a596f364cc6f519685b0b8d202d75447daa Mon Sep 17 00:00:00 2001 From: Mayank Kumar Date: Wed, 21 May 2025 07:26:08 +0000 Subject: [PATCH 09/10] feat: add test for a negitive case where parition has not enabled VTL1 --- opentmk/opentmk/src/context.rs | 46 +- opentmk/opentmk/src/uefi/hypvctx.rs | 456 +++++++++--------- opentmk/opentmk/src/uefi/rt.rs | 4 +- .../src/uefi/tests/hv_error_vp_start.rs | 48 ++ opentmk/opentmk/src/uefi/tests/hv_misc.rs | 107 ++-- .../opentmk/src/uefi/tests/hv_processor.rs | 73 +-- opentmk/opentmk/src/uefi/tests/mod.rs | 6 +- 7 files changed, 373 insertions(+), 367 deletions(-) create mode 100644 opentmk/opentmk/src/uefi/tests/hv_error_vp_start.rs diff --git a/opentmk/opentmk/src/context.rs b/opentmk/opentmk/src/context.rs index 80a8cd7890..304907f221 100644 --- a/opentmk/opentmk/src/context.rs +++ b/opentmk/opentmk/src/context.rs @@ -22,6 +22,7 @@ pub trait MsrPlatformTrait { } pub trait VirtualProcessorPlatformTrait where T: VtlPlatformTrait { + fn get_current_vp(&self) -> TmkResult; fn get_register(&mut self, reg: u32) -> TmkResult; fn get_vp_count(&self) -> TmkResult; fn queue_command_vp(&mut self, cmd: VpExecutor) -> TmkResult<()>; @@ -43,51 +44,6 @@ pub trait VtlPlatformTrait { pub trait X64PlatformTrait {} pub trait Aarch64PlatformTrait {} -pub trait TestCtxTrait { - // partition wide Traits - /// Returns the number of virtual processors (VPs) in the partition. - fn get_vp_count(&self) -> u32; - /// Sets up VTL (Virtualization Trust Level) protection for the partition. - fn setup_vtl_protection(&mut self) -> TmkResult<()>; - /// Sets up a specific VTL for the partition. - fn setup_partition_vtl(&mut self, vtl: Vtl) -> TmkResult<()>; - /// Sets up the interrupt handler for the partition. - fn setup_interrupt_handler(&mut self) -> TmkResult<()>; - /// Sets the interrupt handler for a specific interrupt index. - fn set_interrupt_idx(&mut self, interrupt_idx: u8, handler: fn()) -> TmkResult<()>; - /// Starts a command on a specific virtual processor. - fn start_on_vp(&mut self, cmd: VpExecutor) -> TmkResult<()>; - /// Queues a command to be executed on a virtual processor. - fn queue_command_vp(&mut self, cmd: VpExecutor) -> TmkResult<()>; - /// Sets up a secure intercept for a given interrupt index. - fn setup_secure_intercept(&mut self, interrupt_idx: u8) -> TmkResult<()>; - /// Applies VTL protection to a specified memory range. - fn apply_vtl_protection_for_memory(&mut self, range: Range, vtl: Vtl) -> TmkResult<()>; - /// Sets the default context for a specific virtual processor and VTL. - fn set_default_ctx_to_vp(&mut self, vp_index: u32, vtl: Vtl) -> TmkResult<()>; - /// Starts running a virtual processor with its default context. - fn start_running_vp_with_default_context(&mut self, cmd: VpExecutor)-> TmkResult<()>; - /// Enables VTL for a virtual processor using its default context. - fn enable_vp_vtl_with_default_context(&mut self, vp_index: u32, vtl: Vtl)-> TmkResult<()>; - /// Writes a value to a specific Model Specific Register (MSR). - fn write_msr(&mut self, msr: u32, value: u64)-> TmkResult<()>; - /// Reads a value from a specific Model Specific Register (MSR). - fn read_msr(&mut self, msr: u32) -> TmkResult; - - // per vp wide Traits - /// Gets the index of the current virtual processor. - fn get_current_vp(&self) -> TmkResult; - /// Gets the current VTL of the virtual processor. - fn get_current_vtl(&self) -> TmkResult; - /// Switches the current virtual processor to a higher VTL. - fn switch_to_high_vtl(&mut self); - /// Switches the current virtual processor to a lower VTL. - fn switch_to_low_vtl(&mut self); - /// Gets the value of a specific register for the current virtual processor. - fn get_register(&mut self, reg: u32) -> TmkResult; - -} - pub struct VpExecutor { vp_index: u32, vtl: Vtl, diff --git a/opentmk/opentmk/src/uefi/hypvctx.rs b/opentmk/opentmk/src/uefi/hypvctx.rs index c72dbd0183..8ed85a2a3d 100644 --- a/opentmk/opentmk/src/uefi/hypvctx.rs +++ b/opentmk/opentmk/src/uefi/hypvctx.rs @@ -1,13 +1,16 @@ +use crate::uefi::alloc::ALLOCATOR; use crate::{ - context::{TestCtxTrait, VpExecutor, VtlPlatformTrait}, + context::{ + InterruptPlatformTrait, MsrPlatformTrait, SecureInterceptPlatformTrait, + VirtualProcessorPlatformTrait, VpExecutor, VtlPlatformTrait, + }, hypercall::HvCall, tmkdefs::{TmkError, TmkErrorType, TmkResult}, }; -use crate::uefi::alloc::ALLOCATOR; -use alloc::collections::{btree_map::BTreeMap, btree_set::BTreeSet}; -use alloc::collections::linked_list::LinkedList; use alloc::boxed::Box; +use alloc::collections::linked_list::LinkedList; +use alloc::collections::{btree_map::BTreeMap, btree_set::BTreeSet}; use core::alloc::{GlobalAlloc, Layout}; use core::arch::asm; use core::ops::Range; @@ -19,8 +22,7 @@ use sync_nostd::Mutex; const ALIGNMENT: usize = 4096; -type ComandTable = - BTreeMap, Vtl)>>; +type ComandTable = BTreeMap, Vtl)>>; static mut CMD: Mutex = Mutex::new(BTreeMap::new()); #[allow(static_mut_refs)] @@ -29,13 +31,13 @@ fn cmdt() -> &'static Mutex { } fn register_command_queue(vp_index: u32) { - log::debug!("registering command queue for vp: {}", vp_index); - if cmdt().lock().get(&vp_index).is_none() { - cmdt().lock().insert(vp_index, LinkedList::new()); - log::debug!("registered command queue for vp: {}", vp_index); - } else { - log::debug!("command queue already registered for vp: {}", vp_index); - } + log::debug!("registering command queue for vp: {}", vp_index); + if cmdt().lock().get(&vp_index).is_none() { + cmdt().lock().insert(vp_index, LinkedList::new()); + log::debug!("registered command queue for vp: {}", vp_index); + } else { + log::debug!("command queue already registered for vp: {}", vp_index); + } } pub struct HvTestCtx { @@ -51,87 +53,144 @@ impl Drop for HvTestCtx { } } -/// Implementation of the `TestCtxTrait` for the `HvTestCtx` structure, providing -/// various methods to manage and interact with virtual processors (VPs) and -/// Virtual Trust Levels (VTLs) in a hypervisor context. -/// -/// # Methods -/// -/// - `start_on_vp(&mut self, cmd: VpExecutor)`: -/// Starts a virtual processor (VP) on a specified VTL. Handles enabling VTLs, -/// switching between high and low VTLs, and managing VP execution contexts. -/// -/// - `queue_command_vp(&mut self, cmd: VpExecutor)`: -/// Queues a command for a specific VP and VTL. -/// -/// - `switch_to_high_vtl(&mut self)`: -/// Switches the current execution context to a high VTL. -/// -/// - `switch_to_low_vtl(&mut self)`: -/// Switches the current execution context to a low VTL. -/// -/// - `setup_partition_vtl(&mut self, vtl: Vtl)`: -/// Configures the partition to enable a specified VTL. -/// -/// - `setup_interrupt_handler(&mut self)`: -/// Sets up the interrupt handler for the architecture. -/// -/// - `setup_vtl_protection(&mut self)`: -/// Enables VTL protection for the current partition. -/// -/// - `setup_secure_intercept(&mut self, interrupt_idx: u8)`: -/// Configures secure intercept for a specified interrupt index, including -/// setting up the SIMP and SINT0 registers. -/// -/// - `apply_vtl_protection_for_memory(&mut self, range: Range, vtl: Vtl)`: -/// Applies VTL protections to a specified memory range. -/// -/// - `write_msr(&mut self, msr: u32, value: u64)`: -/// Writes a value to a specified Model-Specific Register (MSR). -/// -/// - `read_msr(&mut self, msr: u32) -> u64`: -/// Reads the value of a specified Model-Specific Register (MSR). -/// -/// - `start_running_vp_with_default_context(&mut self, cmd: VpExecutor)`: -/// Starts a VP with the default execution context. -/// -/// - `set_default_ctx_to_vp(&mut self, vp_index: u32, vtl: Vtl)`: -/// Sets the default execution context for a specified VP and VTL. -/// -/// - `enable_vp_vtl_with_default_context(&mut self, vp_index: u32, vtl: Vtl)`: -/// Enables a VTL for a specified VP using the default execution context. -/// -/// - `set_interupt_idx(&mut self, interrupt_idx: u8, handler: fn())`: -/// Sets an interrupt handler for a specified interrupt index. (x86_64 only) -/// -/// - `get_vp_count(&self) -> u32`: -/// Retrieves the number of virtual processors available on the system. -/// -/// - `get_register(&mut self, reg: u32) -> u128`: -/// Retrieves the value of a specified register. Supports both x86_64 and -/// aarch64 architectures. -/// -/// - `get_current_vp(&self) -> u32`: -/// Returns the index of the current virtual processor. -/// -/// - `get_current_vtl(&self) -> Vtl`: -/// Returns the current Virtual Trust Level (VTL). -impl TestCtxTrait for HvTestCtx { + +impl SecureInterceptPlatformTrait for HvTestCtx { + fn setup_secure_intercept(&mut self, interrupt_idx: u8) -> TmkResult<()> { + let layout = Layout::from_size_align(4096, ALIGNMENT) + .or_else(|_| Err(TmkError(TmkErrorType::AllocationFailed)))?; + + let ptr = unsafe { ALLOCATOR.alloc(layout) }; + let gpn = (ptr as u64) >> 12; + let reg = (gpn << 12) | 0x1; + + unsafe { write_msr(hvdef::HV_X64_MSR_SIMP, reg.into()) }; + log::info!("Successfuly set the SIMP register."); + + let reg = unsafe { read_msr(hvdef::HV_X64_MSR_SINT0) }; + let mut reg: hvdef::HvSynicSint = reg.into(); + reg.set_vector(interrupt_idx); + reg.set_masked(false); + reg.set_auto_eoi(true); + + self.write_msr(hvdef::HV_X64_MSR_SINT0, reg.into())?; + log::info!("Successfuly set the SINT0 register."); + Ok(()) + } +} + + + +impl InterruptPlatformTrait for HvTestCtx { + fn set_interrupt_idx(&mut self, interrupt_idx: u8, handler: fn()) -> TmkResult<()> { + #[cfg(target_arch = "x86_64")] + { + crate::arch::interrupt::set_handler(interrupt_idx, handler); + Ok(()) + } + + #[cfg(not(target_arch = "x86_64"))] + { + Err(TmkError(TmkErrorType::NotImplemented)) + } + } + + fn setup_interrupt_handler(&mut self) -> TmkResult<()> { + crate::arch::interrupt::init(); + Ok(()) + } +} + + + +impl MsrPlatformTrait for HvTestCtx { + fn read_msr(&mut self, msr: u32) -> TmkResult { + let r = unsafe { read_msr(msr) }; + Ok(r) + } + + fn write_msr(&mut self, msr: u32, value: u64) -> TmkResult<()> { + unsafe { write_msr(msr, value) }; + Ok(()) + } +} + + + +impl VirtualProcessorPlatformTrait for HvTestCtx { + fn get_register(&mut self, reg: u32) -> TmkResult { + #[cfg(target_arch = "x86_64")] + { + use hvdef::HvX64RegisterName; + let reg = HvX64RegisterName(reg); + let val = self.hvcall.get_register(reg.into(), None)?.as_u128(); + Ok(val) + } + + #[cfg(target_arch = "aarch64")] + { + use hvdef::HvAarch64RegisterName; + let reg = HvAarch64RegisterName(reg); + let val = self.hvcall.get_register(reg.into(), None)?.as_u128(); + Ok(val) + } + + #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))] + { + Err(TmkError(TmkErrorType::NotImplemented)) + } + } + + fn get_vp_count(&self) -> TmkResult { + #[cfg(target_arch = "x86_64")] + { + let mut result: u32; + unsafe { + asm!( + "push rbx", + "cpuid", + "mov {result:r}, rbx", + "pop rbx", + in("eax") 1u32, + out("ecx") _, + out("edx") _, + result = out(reg) result, + options(nomem, nostack) + ); + } + Ok((result >> 16) & 0xFF) + } + + #[cfg(not(target_arch = "x86_64"))] + { + Err(TmkError(TmkErrorType::NotImplemented)) + } + } + + fn queue_command_vp(&mut self, cmd: VpExecutor) -> TmkResult<()> { + let (vp_index, vtl, cmd) = cmd.get(); + let cmd = cmd.ok_or_else(|| TmkError(TmkErrorType::QueueCommandFailed))?; + cmdt() + .lock() + .get_mut(&vp_index) + .unwrap() + .push_back((cmd, vtl)); + Ok(()) + } + fn start_on_vp(&mut self, cmd: VpExecutor) -> TmkResult<()> { let (vp_index, vtl, cmd) = cmd.get(); let cmd = cmd.ok_or_else(|| TmkError(TmkErrorType::InvalidParameter))?; if vtl >= Vtl::Vtl2 { - panic!("error: can't run on vtl2"); + return Err(TmkError(TmkErrorType::InvalidParameter)); } + let is_vp_running = self.vp_runing.get(&vp_index); if let Some(_running_vtl) = is_vp_running { log::debug!("both vtl0 and vtl1 are running for VP: {:?}", vp_index); } else { if vp_index == 0 { - let vp_context = self - .get_default_context()?; - self.hvcall - .enable_vp_vtl(0, Vtl::Vtl1, Some(vp_context))?; + let vp_context = self.get_default_context()?; + self.hvcall.enable_vp_vtl(0, Vtl::Vtl1, Some(vp_context))?; cmdt().lock().get_mut(&vp_index).unwrap().push_back(( Box::new(move |ctx| { @@ -142,27 +201,43 @@ impl TestCtxTrait for HvTestCtx { self.switch_to_high_vtl(); self.vp_runing.insert(vp_index); } else { + let (tx, rx) = sync_nostd::Channel::>::new().split(); cmdt().lock().get_mut(&self.my_vp_idx).unwrap().push_back(( Box::new(move |ctx| { - _ = ctx.enable_vp_vtl_with_default_context(vp_index, Vtl::Vtl1); - _ = ctx.start_running_vp_with_default_context(VpExecutor::new( + let r = ctx.enable_vp_vtl_with_default_context(vp_index, Vtl::Vtl1); + if r.is_err() { + let _ = tx.send(r); + return; + } + let r = ctx.start_running_vp_with_default_context(VpExecutor::new( vp_index, Vtl::Vtl0, )); + if r.is_err() { + let _ = tx.send(r); + return; + } + let _ = tx.send(Ok(())); ctx.switch_to_low_vtl(); }), Vtl::Vtl1, )); - self.switch_to_high_vtl(); + log::debug!("VP{} waiting for start confirmation for vp from VTL1: {}", self.my_vp_idx, vp_index); + let rx = rx.recv(); + if let Ok(r) = rx { + r?; + } self.vp_runing.insert(vp_index); } } + cmdt() .lock() .get_mut(&vp_index) .unwrap() .push_back((cmd, vtl)); + if vp_index == self.my_vp_idx && self.my_vtl != vtl { if vtl == Vtl::Vtl0 { self.switch_to_low_vtl(); @@ -173,89 +248,37 @@ impl TestCtxTrait for HvTestCtx { Ok(()) } - fn queue_command_vp(&mut self, cmd: VpExecutor) -> TmkResult<()> { - let (vp_index, vtl, cmd) = cmd.get(); - let cmd = - cmd.ok_or_else(|| TmkError(TmkErrorType::QueueCommandFailed))?; - cmdt() - .lock() - .get_mut(&vp_index) - .unwrap() - .push_back((cmd, vtl)); - Ok(()) - } - - fn switch_to_high_vtl(&mut self) { - HvCall::vtl_call(); - } - - fn switch_to_low_vtl(&mut self) { - HvCall::vtl_return(); - } - - fn setup_partition_vtl(&mut self, vtl: Vtl) -> TmkResult<()> { - self.hvcall - .enable_partition_vtl(hvdef::HV_PARTITION_ID_SELF, vtl)?; - log::info!("enabled vtl protections for the partition."); - Ok(()) - } - fn setup_interrupt_handler(&mut self) -> TmkResult<()> { - crate::arch::interrupt::init(); - Ok(()) - } - - fn setup_vtl_protection(&mut self) -> TmkResult<()> { + fn start_running_vp_with_default_context( + &mut self, + cmd: VpExecutor, + ) -> TmkResult<()> { + let (vp_index, vtl, _cmd) = cmd.get(); + let vp_ctx = self.get_default_context()?; self.hvcall - .enable_vtl_protection(HvInputVtl::CURRENT_VTL)?; - - log::info!("enabled vtl protections for the partition."); + .start_virtual_processor(vp_index, vtl, Some(vp_ctx))?; Ok(()) } - fn setup_secure_intercept(&mut self, interrupt_idx: u8) -> TmkResult<()> { - let layout = Layout::from_size_align(4096, ALIGNMENT).or_else(|_| Err(TmkError(TmkErrorType::AllocationFailed)))?; - - let ptr = unsafe { ALLOCATOR.alloc(layout) }; - let gpn = (ptr as u64) >> 12; - let reg = (gpn << 12) | 0x1; - - unsafe { write_msr(hvdef::HV_X64_MSR_SIMP, reg.into()) }; - log::info!("Successfuly set the SIMP register."); - - let reg = unsafe { read_msr(hvdef::HV_X64_MSR_SINT0) }; - let mut reg: hvdef::HvSynicSint = reg.into(); - reg.set_vector(interrupt_idx); - reg.set_masked(false); - reg.set_auto_eoi(true); - - self.write_msr(hvdef::HV_X64_MSR_SINT0, reg.into())?; - log::info!("Successfuly set the SINT0 register."); - Ok(()) + fn get_current_vp(&self) -> TmkResult { + Ok(self.my_vp_idx) } +} +impl VtlPlatformTrait for HvTestCtx { fn apply_vtl_protection_for_memory(&mut self, range: Range, vtl: Vtl) -> TmkResult<()> { self.hvcall .apply_vtl_protections(MemoryRange::new(range), vtl)?; Ok(()) } - fn write_msr(&mut self, msr: u32, value: u64) -> TmkResult<()> { - unsafe { write_msr(msr, value) }; + fn enable_vp_vtl_with_default_context(&mut self, vp_index: u32, vtl: Vtl) -> TmkResult<()> { + let vp_ctx = self.get_default_context()?; + self.hvcall.enable_vp_vtl(vp_index, vtl, Some(vp_ctx))?; Ok(()) } - fn read_msr(&mut self, msr: u32) -> TmkResult { - let r = unsafe { read_msr(msr) }; - Ok(r) - } - - fn start_running_vp_with_default_context(&mut self, cmd: VpExecutor) -> TmkResult<()> { - let (vp_index, vtl, _cmd) = cmd.get(); - let vp_ctx = self - .get_default_context()?; - self.hvcall - .start_virtual_processor(vp_index, vtl, Some(vp_ctx))?; - Ok(()) + fn get_current_vtl(&self) -> TmkResult { + Ok(self.my_vtl) } fn set_default_ctx_to_vp(&mut self, vp_index: u32, vtl: Vtl) -> TmkResult<()> { @@ -264,88 +287,40 @@ impl TestCtxTrait for HvTestCtx { Vtl::Vtl1 => 1, Vtl::Vtl2 => 2, }; - let vp_context = self - .get_default_context()?; - self.hvcall - .set_vp_registers( - vp_index, - Some( - HvInputVtl::new() - .with_target_vtl_value(i) - .with_use_target_vtl(true), - ), - Some(vp_context), - )?; + let vp_context = self.get_default_context()?; + self.hvcall.set_vp_registers( + vp_index, + Some( + HvInputVtl::new() + .with_target_vtl_value(i) + .with_use_target_vtl(true), + ), + Some(vp_context), + )?; Ok(()) } - fn enable_vp_vtl_with_default_context(&mut self, vp_index: u32, vtl: Vtl) -> TmkResult<()> { - let vp_ctx = self - .get_default_context()?; + fn setup_partition_vtl(&mut self, vtl: Vtl) -> TmkResult<()> { self.hvcall - .enable_vp_vtl(vp_index, vtl, Some(vp_ctx))?; + .enable_partition_vtl(hvdef::HV_PARTITION_ID_SELF, vtl)?; + log::info!("enabled vtl protections for the partition."); Ok(()) } - #[cfg(target_arch = "x86_64")] - fn set_interrupt_idx(&mut self, interrupt_idx: u8, handler: fn()) -> TmkResult<()> { - crate::arch::interrupt::set_handler(interrupt_idx, handler); + fn setup_vtl_protection(&mut self) -> TmkResult<()> { + self.hvcall.enable_vtl_protection(HvInputVtl::CURRENT_VTL)?; + log::info!("enabled vtl protections for the partition."); Ok(()) } - #[cfg(target_arch = "x86_64")] - fn get_vp_count(&self) -> u32 { - let mut result: u32; - unsafe { - // Call CPUID with EAX=1, but work around the rbx constraint - asm!( - "push rbx", // Save rbx - "cpuid", // Execute CPUID - "mov {result:r}, rbx", // Store ebx to our result variable - "pop rbx", // Restore rbx - in("eax") 1u32, // Input: CPUID leaf 1 - out("ecx") _, // Output registers (not used) - out("edx") _, // Output registers (not used) - result = out(reg) result, // Output: result from ebx - options(nomem, nostack) - ); - } - - // Extract logical processor count from bits [23:16] - (result >> 16) & 0xFF - } - - #[cfg(target_arch = "x86_64")] - fn get_register(&mut self, reg: u32) -> TmkResult { - use hvdef::HvX64RegisterName; - - let reg = HvX64RegisterName(reg); - let val = self.hvcall - .get_register(reg.into(), None)? - .as_u128(); - Ok(val) - } - - #[cfg(target_arch = "aarch64")] - fn get_register(&mut self, reg: u32) -> TmkResult { - use hvdef::HvAarch64RegisterName; - - let reg = HvAarch64RegisterName(reg); - let val = self.hvcall - .get_register(reg.into(), None)? - .as_u128(); - Ok(val) - } - - fn get_current_vp(&self) -> TmkResult { - Ok(self.my_vp_idx) + fn switch_to_high_vtl(&mut self) { + HvCall::vtl_call(); } - fn get_current_vtl(&self) -> TmkResult { - Ok(self.my_vtl) + fn switch_to_low_vtl(&mut self) { + HvCall::vtl_return(); } } - impl HvTestCtx { pub const fn new() -> Self { HvTestCtx { @@ -356,18 +331,19 @@ impl HvTestCtx { } } - pub fn init(&mut self) { + pub fn init(&mut self) -> TmkResult<()> { self.hvcall.initialize(); - let vp_count = self.get_vp_count(); + let vp_count = self.get_vp_count()?; for i in 0..vp_count { register_command_queue(i); } self.my_vtl = self.hvcall.vtl(); + Ok(()) } fn exec_handler() { let mut ctx = HvTestCtx::new(); - ctx.init(); + ctx.init().expect("error: failed to init on a VP"); let reg = ctx .hvcall .get_register(hvdef::HvAllArchRegisterName::VpIndex.into(), None) @@ -472,13 +448,21 @@ impl From for TmkError { hvdef::HvError::NoResources => TmkErrorType::NoResources, hvdef::HvError::FeatureUnavailable => TmkErrorType::FeatureUnavailable, hvdef::HvError::PartialPacket => TmkErrorType::PartialPacket, - hvdef::HvError::ProcessorFeatureNotSupported => TmkErrorType::ProcessorFeatureNotSupported, - hvdef::HvError::ProcessorCacheLineFlushSizeIncompatible => TmkErrorType::ProcessorCacheLineFlushSizeIncompatible, + hvdef::HvError::ProcessorFeatureNotSupported => { + TmkErrorType::ProcessorFeatureNotSupported + } + hvdef::HvError::ProcessorCacheLineFlushSizeIncompatible => { + TmkErrorType::ProcessorCacheLineFlushSizeIncompatible + } hvdef::HvError::InsufficientBuffer => TmkErrorType::InsufficientBuffer, hvdef::HvError::IncompatibleProcessor => TmkErrorType::IncompatibleProcessor, hvdef::HvError::InsufficientDeviceDomains => TmkErrorType::InsufficientDeviceDomains, - hvdef::HvError::CpuidFeatureValidationError => TmkErrorType::CpuidFeatureValidationError, - hvdef::HvError::CpuidXsaveFeatureValidationError => TmkErrorType::CpuidXsaveFeatureValidationError, + hvdef::HvError::CpuidFeatureValidationError => { + TmkErrorType::CpuidFeatureValidationError + } + hvdef::HvError::CpuidXsaveFeatureValidationError => { + TmkErrorType::CpuidXsaveFeatureValidationError + } hvdef::HvError::ProcessorStartupTimeout => TmkErrorType::ProcessorStartupTimeout, hvdef::HvError::SmxEnabled => TmkErrorType::SmxEnabled, hvdef::HvError::InvalidLpIndex => TmkErrorType::InvalidLpIndex, @@ -494,7 +478,9 @@ impl From for TmkError { hvdef::HvError::InvalidCpuGroupId => TmkErrorType::InvalidCpuGroupId, hvdef::HvError::InvalidCpuGroupState => TmkErrorType::InvalidCpuGroupState, hvdef::HvError::OperationFailed => TmkErrorType::OperationFailed, - hvdef::HvError::NotAllowedWithNestedVirtActive => TmkErrorType::NotAllowedWithNestedVirtActive, + hvdef::HvError::NotAllowedWithNestedVirtActive => { + TmkErrorType::NotAllowedWithNestedVirtActive + } hvdef::HvError::InsufficientRootMemory => TmkErrorType::InsufficientRootMemory, hvdef::HvError::EventBufferAlreadyFreed => TmkErrorType::EventBufferAlreadyFreed, hvdef::HvError::Timeout => TmkErrorType::Timeout, @@ -509,7 +495,11 @@ impl From for TmkError { TmkErrorType::OperationFailed // Generic fallback } }; - log::debug!("Mapped hvdef::HvError::{:?} to TmkErrorType::{:?}", e, tmk_error_type); + log::debug!( + "Mapped hvdef::HvError::{:?} to TmkErrorType::{:?}", + e, + tmk_error_type + ); TmkError(tmk_error_type) } -} \ No newline at end of file +} diff --git a/opentmk/opentmk/src/uefi/rt.rs b/opentmk/opentmk/src/uefi/rt.rs index d8a80b4b53..46cfc431d4 100644 --- a/opentmk/opentmk/src/uefi/rt.rs +++ b/opentmk/opentmk/src/uefi/rt.rs @@ -10,8 +10,6 @@ use core::arch::asm; #[panic_handler] fn panic_handler(panic: &core::panic::PanicInfo<'_>) -> ! { log::error!("Panic at runtime: {}", panic); - unsafe { - asm!("int 8H"); - } + log::warn!("TEST_END"); loop {} } diff --git a/opentmk/opentmk/src/uefi/tests/hv_error_vp_start.rs b/opentmk/opentmk/src/uefi/tests/hv_error_vp_start.rs new file mode 100644 index 0000000000..bda77c6322 --- /dev/null +++ b/opentmk/opentmk/src/uefi/tests/hv_error_vp_start.rs @@ -0,0 +1,48 @@ +use hvdef::Vtl; +use sync_nostd::Channel; + +use crate::{context::{VirtualProcessorPlatformTrait, VpExecutor, VtlPlatformTrait}, tmk_assert, tmkdefs::TmkErrorType}; + + +pub fn exec(ctx: &mut T) +where + T: VtlPlatformTrait + + VirtualProcessorPlatformTrait, +{ + + // Skiping VTL setup for now to test the negitive case + + let vp_count = ctx.get_vp_count(); + tmk_assert!(vp_count.is_ok(), "get_vp_count should succeed"); + + let vp_count = vp_count.unwrap(); + tmk_assert!(vp_count == 8, "vp count should be 8"); + + // Testing BSP VTL1 Bringup + { + let (tx, _rx) = Channel::new().split(); + + let result = ctx.start_on_vp(VpExecutor::new(0, Vtl::Vtl1).command(move |ctx: &mut T| { + let vp = ctx.get_current_vp(); + tmk_assert!(vp.is_ok(), "vp should be valid"); + + let vp = vp.unwrap(); + log::info!("vp: {}", vp); + tmk_assert!(vp == 0, "vp should be equal to 0"); + + let vtl = ctx.get_current_vtl(); + tmk_assert!(vtl.is_ok(), "vtl should be valid"); + + let vtl = vtl.unwrap(); + log::info!("vtl: {:?}", vtl); + tmk_assert!(vtl == Vtl::Vtl1, "vtl should be Vtl1 for BSP"); + tx.send(()) + .expect("Failed to send message through the channel"); + ctx.switch_to_low_vtl(); + })); + + tmk_assert!(result.is_err(), "start_on_vp should fail"); + tmk_assert!(result.unwrap_err() == crate::tmkdefs::TmkErrorType::InvalidVtlState.into(), "start_on_vp should fail with InvalidVtlState"); + log::info!("result on start_on_vp: {:?}", result); + } +} diff --git a/opentmk/opentmk/src/uefi/tests/hv_misc.rs b/opentmk/opentmk/src/uefi/tests/hv_misc.rs index 681d8baa3b..0621155ebd 100644 --- a/opentmk/opentmk/src/uefi/tests/hv_misc.rs +++ b/opentmk/opentmk/src/uefi/tests/hv_misc.rs @@ -1,19 +1,21 @@ #![allow(warnings)] +use crate::context::{ + InterruptPlatformTrait, SecureInterceptPlatformTrait, VirtualProcessorPlatformTrait, + VtlPlatformTrait, +}; // WIP : This test is not yet complete and is not expected to pass. // // This test is to verify that the VTL protections are working as expected. // The stack values in VTL0 are changing after interrupt handling in VTL1. +use crate::tmk_assert; use crate::tmk_assert::{AssertOption, AssertResult}; use crate::tmkdefs::TmkResult; -use sync_nostd::{Channel, Receiver, Sender}; use crate::uefi::alloc::{ALLOCATOR, SIZE_1MB}; use crate::{context, uefi::hypvctx}; -use crate::{tmk_assert}; use ::alloc::boxed::Box; -use alloc::sync::Arc; use ::alloc::vec::Vec; -use context::{TestCtxTrait, VpExecutor}; -use hypvctx::HvTestCtx; +use alloc::sync::Arc; +use context::VpExecutor; use core::alloc::{GlobalAlloc, Layout}; use core::arch::asm; use core::cell::RefCell; @@ -21,16 +23,26 @@ use core::ops::Range; use core::sync::atomic::{AtomicI32, Ordering}; use hvdef::hypercall::HvInputVtl; use hvdef::{HvAllArchRegisterName, HvRegisterVsmVpStatus, HvX64RegisterName, Vtl}; +use hypvctx::HvTestCtx; +use sync_nostd::{Channel, Receiver, Sender}; use uefi::entry; use uefi::Status; static mut HEAPX: RefCell<*mut u8> = RefCell::new(0 as *mut u8); static mut CON: AtomicI32 = AtomicI32::new(0); -pub fn exec(ctx: &mut T) where T: TestCtxTrait { +pub fn exec(ctx: &mut T) +where + T: InterruptPlatformTrait + + SecureInterceptPlatformTrait + + VtlPlatformTrait + + VirtualProcessorPlatformTrait, +{ log::info!("ctx ptr: {:p}", &ctx as *const _); - let mut vp_count = ctx.get_vp_count(); + let vp_count = ctx.get_vp_count(); + tmk_assert!(vp_count.is_ok(), "get_vp_count should succeed"); + let vp_count = vp_count.unwrap(); tmk_assert!(vp_count == 8, "vp count should be 8"); ctx.setup_interrupt_handler(); @@ -39,54 +51,51 @@ pub fn exec(ctx: &mut T) where T: TestCtxTrait { ctx.setup_partition_vtl(Vtl::Vtl1); - ctx.start_on_vp( - VpExecutor::new(0, Vtl::Vtl1).command(move |ctx: &mut T|{ - log::info!("successfully started running VTL1 on vp0."); - ctx.setup_secure_intercept(0x30); - ctx.set_interrupt_idx(0x30, || { - log::info!("interrupt fired!"); + ctx.start_on_vp(VpExecutor::new(0, Vtl::Vtl1).command(move |ctx: &mut T| { + log::info!("successfully started running VTL1 on vp0."); + ctx.setup_secure_intercept(0x30); + ctx.set_interrupt_idx(0x30, || { + log::info!("interrupt fired!"); + + let mut hv_test_ctx = HvTestCtx::new(); + hv_test_ctx.init(); - let mut hv_test_ctx = HvTestCtx::new(); - hv_test_ctx.init(); + let c = hv_test_ctx.get_register(HvAllArchRegisterName::VsmVpStatus.0); + tmk_assert!(c.is_ok(), "read should succeed"); - let c = hv_test_ctx.get_register(HvAllArchRegisterName::VsmVpStatus.0); - tmk_assert!(c.is_ok(), "read should succeed"); - - let c = c.unwrap(); - let cp = HvRegisterVsmVpStatus::from_bits(c as u64); + let c = c.unwrap(); + let cp = HvRegisterVsmVpStatus::from_bits(c as u64); - log::info!("VSM VP Status: {:?}", cp); + log::info!("VSM VP Status: {:?}", cp); - log::info!("interrupt handled!"); - }); + log::info!("interrupt handled!"); + }); - let layout = - Layout::from_size_align(SIZE_1MB, 4096).expect("msg: failed to create layout"); - let ptr = unsafe { ALLOCATOR.alloc(layout) }; - log::info!("allocated some memory in the heap from vtl1"); - unsafe { - let mut z = HEAPX.borrow_mut(); - *z = ptr; - *ptr.add(10) = 0xAA; - } + let layout = Layout::from_size_align(SIZE_1MB, 4096).expect("msg: failed to create layout"); + let ptr = unsafe { ALLOCATOR.alloc(layout) }; + log::info!("allocated some memory in the heap from vtl1"); + unsafe { + let mut z = HEAPX.borrow_mut(); + *z = ptr; + *ptr.add(10) = 0xAA; + } - let size = layout.size(); - ctx.setup_vtl_protection(); + let size = layout.size(); + ctx.setup_vtl_protection(); - log::info!("enabled vtl protections for the partition."); + log::info!("enabled vtl protections for the partition."); - let range = Range { - start: ptr as u64, - end: ptr as u64 + size as u64, - }; + let range = Range { + start: ptr as u64, + end: ptr as u64 + size as u64, + }; - ctx.apply_vtl_protection_for_memory(range, Vtl::Vtl1); + ctx.apply_vtl_protection_for_memory(range, Vtl::Vtl1); - log::info!("moving to vtl0 to attempt to read the heap memory"); + log::info!("moving to vtl0 to attempt to read the heap memory"); - ctx.switch_to_low_vtl(); - }), - ); + ctx.switch_to_low_vtl(); + })); ctx.queue_command_vp(VpExecutor::new(0, Vtl::Vtl1).command(move |ctx: &mut T| { log::info!("successfully started running VTL1 on vp0."); @@ -126,15 +135,13 @@ pub fn exec(ctx: &mut T) where T: TestCtxTrait { // } log::info!("ctx ptr: {:p}", &ctx as *const _); let c = ctx.get_vp_count(); - + tmk_assert!(c.is_ok(), "get_vp_count should succeed"); + let c = c.unwrap(); tmk_assert!(c == 8, "vp count should be 8"); // rx.recv(); log::info!("we are in vtl0 now!"); log::info!("we reached the end of the test"); - loop { - - } - -} \ No newline at end of file + loop {} +} diff --git a/opentmk/opentmk/src/uefi/tests/hv_processor.rs b/opentmk/opentmk/src/uefi/tests/hv_processor.rs index 7ef48d0f1f..a7e12f89a5 100644 --- a/opentmk/opentmk/src/uefi/tests/hv_processor.rs +++ b/opentmk/opentmk/src/uefi/tests/hv_processor.rs @@ -2,41 +2,48 @@ use hvdef::Vtl; use sync_nostd::Channel; use crate::{ - tmk_assert, context::{TestCtxTrait, VpExecutor} + context::{ + VirtualProcessorPlatformTrait, + VpExecutor, VtlPlatformTrait, + }, + tmk_assert, }; -pub fn exec(ctx: &mut T) where T: TestCtxTrait { - let r = ctx.setup_interrupt_handler(); - tmk_assert!(r.is_ok(), "setup_interrupt_handler should succeed"); - +pub fn exec(ctx: &mut T) +where + T: VtlPlatformTrait + + VirtualProcessorPlatformTrait, +{ let r = ctx.setup_partition_vtl(Vtl::Vtl1); tmk_assert!(r.is_ok(), "setup_partition_vtl should succeed"); - + let vp_count = ctx.get_vp_count(); + tmk_assert!(vp_count.is_ok(), "get_vp_count should succeed"); + + let vp_count = vp_count.unwrap(); tmk_assert!(vp_count == 8, "vp count should be 8"); // Testing BSP VTL Bringup { let (tx, rx) = Channel::new().split(); - let result = ctx.start_on_vp(VpExecutor::new(0, Vtl::Vtl1).command( - move |ctx: &mut T| { - let vp = ctx.get_current_vp(); - tmk_assert!(vp.is_ok(), "vp should be valid"); - - let vp = vp.unwrap(); - log::info!("vp: {}", vp); - tmk_assert!(vp == 0, "vp should be equal to 0"); - - let vtl = ctx.get_current_vtl(); - tmk_assert!(vtl.is_ok(), "vtl should be valid"); - - let vtl = vtl.unwrap(); - log::info!("vtl: {:?}", vtl); - tmk_assert!(vtl == Vtl::Vtl1, "vtl should be Vtl1 for BSP"); - tx.send(()).expect("Failed to send message through the channel"); - ctx.switch_to_low_vtl(); - }, - )); + let result = ctx.start_on_vp(VpExecutor::new(0, Vtl::Vtl1).command(move |ctx: &mut T| { + let vp = ctx.get_current_vp(); + tmk_assert!(vp.is_ok(), "vp should be valid"); + + let vp = vp.unwrap(); + log::info!("vp: {}", vp); + tmk_assert!(vp == 0, "vp should be equal to 0"); + + let vtl = ctx.get_current_vtl(); + tmk_assert!(vtl.is_ok(), "vtl should be valid"); + + let vtl = vtl.unwrap(); + log::info!("vtl: {:?}", vtl); + tmk_assert!(vtl == Vtl::Vtl1, "vtl should be Vtl1 for BSP"); + tx.send(()) + .expect("Failed to send message through the channel"); + ctx.switch_to_low_vtl(); + })); tmk_assert!(result.is_ok(), "start_on_vp should succeed"); _ = rx.recv(); } @@ -45,11 +52,11 @@ pub fn exec(ctx: &mut T) where T: TestCtxTrait { // Testing VTL1 { let (tx, rx) = Channel::new().split(); - let result = ctx.start_on_vp(VpExecutor::new(i, Vtl::Vtl1).command( - move |ctx: &mut T| { + let result = + ctx.start_on_vp(VpExecutor::new(i, Vtl::Vtl1).command(move |ctx: &mut T| { let vp = ctx.get_current_vp(); tmk_assert!(vp.is_ok(), "vp should be valid"); - + let vp = vp.unwrap(); log::info!("vp: {}", vp); tmk_assert!(vp == i, format!("vp should be equal to {}", i)); @@ -61,8 +68,7 @@ pub fn exec(ctx: &mut T) where T: TestCtxTrait { log::info!("vtl: {:?}", vtl); tmk_assert!(vtl == Vtl::Vtl1, format!("vtl should be Vtl1 for VP {}", i)); _ = tx.send(()); - }, - )); + })); tmk_assert!(result.is_ok(), "start_on_vp should succeed"); _ = rx.recv(); } @@ -70,8 +76,8 @@ pub fn exec(ctx: &mut T) where T: TestCtxTrait { // Testing VTL0 { let (tx, rx) = Channel::new().split(); - let result = ctx.start_on_vp(VpExecutor::new(i, Vtl::Vtl0).command( - move |ctx: &mut T| { + let result = + ctx.start_on_vp(VpExecutor::new(i, Vtl::Vtl0).command(move |ctx: &mut T| { let vp = ctx.get_current_vp(); tmk_assert!(vp.is_ok(), "vp should be valid"); @@ -86,8 +92,7 @@ pub fn exec(ctx: &mut T) where T: TestCtxTrait { log::info!("vtl: {:?}", vtl); tmk_assert!(vtl == Vtl::Vtl0, format!("vtl should be Vtl0 for VP {}", i)); _ = tx.send(()); - }, - )); + })); tmk_assert!(result.is_ok(), "start_on_vp should succeed"); _ = rx.recv(); } diff --git a/opentmk/opentmk/src/uefi/tests/mod.rs b/opentmk/opentmk/src/uefi/tests/mod.rs index 665873ac3d..6ba7df3c84 100644 --- a/opentmk/opentmk/src/uefi/tests/mod.rs +++ b/opentmk/opentmk/src/uefi/tests/mod.rs @@ -2,9 +2,11 @@ use super::hypvctx::HvTestCtx; pub mod hv_processor; pub mod hv_misc; +mod hv_error_vp_start; + pub fn run_test() { let mut ctx = HvTestCtx::new(); - ctx.init(); - hv_processor::exec(&mut ctx); + ctx.init().expect("failed to init on BSP"); + hv_error_vp_start::exec(&mut ctx); } \ No newline at end of file From bb2227037cfdcd142bdaba7d08638552d150c21b Mon Sep 17 00:00:00 2001 From: Mayank Kumar Date: Thu, 22 May 2025 13:11:09 +0000 Subject: [PATCH 10/10] docs: add documentation for structs --- opentmk/opentmk/Cargo.toml | 3 +- .../arch/x86_64/interrupt_handler_register.rs | 18 ++- opentmk/opentmk/src/context.rs | 65 +++++++- opentmk/opentmk/src/hypercall.rs | 30 ++-- opentmk/opentmk/src/main.rs | 1 + opentmk/opentmk/src/tmk_assert.rs | 142 +++++++----------- opentmk/opentmk/src/tmk_logger.rs | 39 +++-- opentmk/opentmk/src/uefi/hypvctx.rs | 69 +++++++-- .../src/uefi/tests/hv_error_vp_start.rs | 3 +- opentmk/opentmk/src/uefi/tests/hv_misc.rs | 16 +- opentmk/opentmk/src/uefi/tests/mod.rs | 7 +- 11 files changed, 238 insertions(+), 155 deletions(-) diff --git a/opentmk/opentmk/Cargo.toml b/opentmk/opentmk/Cargo.toml index 27eeee0c6c..64926f8e7c 100644 --- a/opentmk/opentmk/Cargo.toml +++ b/opentmk/opentmk/Cargo.toml @@ -16,7 +16,7 @@ linked_list_allocator.workspace = true log.workspace = true memory_range.workspace = true minimal_rt.workspace = true -serde = { version = "1.0", default-features = false} +serde = { version = "1.0", default-features = false, features = ["derive"]} serde_json = { version = "1.0", default-features = false, features = ["alloc"] } uefi = { workspace = true, features = ["alloc"] } x86_64.workspace = true @@ -33,3 +33,4 @@ minimal_rt_build.workspace = true [profile.release] debug = false strip = "debuginfo" + diff --git a/opentmk/opentmk/src/arch/x86_64/interrupt_handler_register.rs b/opentmk/opentmk/src/arch/x86_64/interrupt_handler_register.rs index e6084ec4ce..a2b799b18b 100644 --- a/opentmk/opentmk/src/arch/x86_64/interrupt_handler_register.rs +++ b/opentmk/opentmk/src/arch/x86_64/interrupt_handler_register.rs @@ -2,14 +2,20 @@ use x86_64::structures::idt::{InterruptDescriptorTable, InterruptStackFrame, PageFaultErrorCode}; use sync_nostd::Mutex; - static mut COMMON_HANDLER: fn(InterruptStackFrame, u8) = common_handler; static COMMON_HANDLER_MUTEX: Mutex<()> = Mutex::new(()); + +#[unsafe(no_mangle)] +fn abstraction_handle(stack_frame: InterruptStackFrame, interrupt: u8) { + unsafe { (COMMON_HANDLER)(stack_frame, interrupt) }; + log::debug!("Interrupt: {}", interrupt); +} + macro_rules! create_fn { ($name:ident, $i: expr) => { extern "x86-interrupt" fn $name(stack_frame: InterruptStackFrame) { - unsafe { (COMMON_HANDLER)(stack_frame, $i) }; + abstraction_handle(stack_frame, $i); } }; } @@ -17,7 +23,7 @@ macro_rules! create_fn { macro_rules! create_fn_create_with_errorcode { ($name:ident, $i: expr) => { extern "x86-interrupt" fn $name(stack_frame: InterruptStackFrame, _error_code: u64) { - unsafe { (COMMON_HANDLER)(stack_frame, $i) }; + abstraction_handle(stack_frame, $i); } }; } @@ -25,7 +31,7 @@ macro_rules! create_fn_create_with_errorcode { macro_rules! create_fn_divergent_create_with_errorcode { ($name:ident, $i: expr) => { extern "x86-interrupt" fn $name(stack_frame: InterruptStackFrame, _error_code: u64) -> ! { - unsafe { (COMMON_HANDLER)(stack_frame, $i) }; + abstraction_handle(stack_frame, $i); loop{} } }; @@ -34,7 +40,7 @@ macro_rules! create_fn_divergent_create_with_errorcode { macro_rules! create_fn_divergent_create { ($name:ident, $i: expr) => { extern "x86-interrupt" fn $name(stack_frame: InterruptStackFrame) -> ! { - unsafe { (COMMON_HANDLER)(stack_frame, $i) }; + abstraction_handle(stack_frame, $i); loop{} } }; @@ -43,7 +49,7 @@ macro_rules! create_fn_divergent_create { macro_rules! create_page_fault_fn { ($name:ident, $i: expr) => { extern "x86-interrupt" fn $name(stack_frame:InterruptStackFrame, _error_code: PageFaultErrorCode) { - unsafe { (COMMON_HANDLER)(stack_frame, $i) }; + abstraction_handle(stack_frame, $i); } }; } diff --git a/opentmk/opentmk/src/context.rs b/opentmk/opentmk/src/context.rs index 304907f221..0962b81a99 100644 --- a/opentmk/opentmk/src/context.rs +++ b/opentmk/opentmk/src/context.rs @@ -6,38 +6,91 @@ use hvdef::Vtl; use crate::tmkdefs::TmkResult; - pub trait SecureInterceptPlatformTrait { + /// Installs a secure-world intercept for the given interrupt. + /// + /// The platform must arrange that the supplied `interrupt_idx` + /// triggers a VM-exit or any other mechanism that transfers control + /// to the TMK secure handler. + /// + /// Returns `Ok(())` on success or an error wrapped in `TmkResult`. fn setup_secure_intercept(&mut self, interrupt_idx: u8) -> TmkResult<()>; } pub trait InterruptPlatformTrait { + /// Associates an interrupt vector with a handler inside the + /// non-secure world. + /// + /// * `interrupt_idx` – IDT/GIC index to program + /// * `handler` – Function that will be executed when the interrupt + /// fires. fn set_interrupt_idx(&mut self, interrupt_idx: u8, handler: fn()) -> TmkResult<()>; + + /// Finalises platform specific interrupt setup (enables the table, + /// unmasks lines, etc.). fn setup_interrupt_handler(&mut self) -> TmkResult<()>; } pub trait MsrPlatformTrait { + /// Reads the content of `msr`. + /// + /// Returns the 64-bit value currently stored in that MSR. fn read_msr(&mut self, msr: u32) -> TmkResult; + + /// Writes `value` into `msr`. fn write_msr(&mut self, msr: u32, value: u64) -> TmkResult<()>; } -pub trait VirtualProcessorPlatformTrait where T: VtlPlatformTrait { +pub trait VirtualProcessorPlatformTrait +where + T: VtlPlatformTrait, +{ + /// Returns the index of the virtual CPU currently executing this + /// code. fn get_current_vp(&self) -> TmkResult; + + /// Reads the architecture specific register identified by `reg`. fn get_register(&mut self, reg: u32) -> TmkResult; + + /// Total number of online VPs in the partition. fn get_vp_count(&self) -> TmkResult; + + /// Queues `cmd` to run later on the VP described inside the + /// `VpExecutor`. fn queue_command_vp(&mut self, cmd: VpExecutor) -> TmkResult<()>; + + /// Synchronously executes `cmd` on its target VP. fn start_on_vp(&mut self, cmd: VpExecutor) -> TmkResult<()>; + + /// Starts the target VP (if required) and executes `cmd` with a + /// platform provided default VTL context. fn start_running_vp_with_default_context(&mut self, cmd: VpExecutor) -> TmkResult<()>; } pub trait VtlPlatformTrait { + /// Applies VTL protection to the supplied physical address range. fn apply_vtl_protection_for_memory(&mut self, range: Range, vtl: Vtl) -> TmkResult<()>; - fn enable_vp_vtl_with_default_context(&mut self, vp_index: u32, vtl: Vtl) -> TmkResult<()>; + + /// Enables the given `vtl` on `vp_index` with a default context. + fn enable_vp_vtl_with_default_context(&mut self, vp_index: u32, vtl: Vtl) -> TmkResult<()>; + + /// Returns the VTL level the caller is currently executing in. fn get_current_vtl(&self) -> TmkResult; + + /// Sets the default VTL context on `vp_index`. fn set_default_ctx_to_vp(&mut self, vp_index: u32, vtl: Vtl) -> TmkResult<()>; + + /// Performs partition wide initialisation for a given `vtl`. fn setup_partition_vtl(&mut self, vtl: Vtl) -> TmkResult<()>; + + /// Platform specific global VTL preparation (stage 2 translation, + /// EPT, etc.). fn setup_vtl_protection(&mut self) -> TmkResult<()>; + + /// Switches the current hardware thread to the higher privileged VTL. fn switch_to_high_vtl(&mut self); + + /// Switches the current hardware thread back to the lower privileged VTL. fn switch_to_low_vtl(&mut self); } @@ -51,6 +104,7 @@ pub struct VpExecutor { } impl VpExecutor { + /// Creates a new executor targeting `vp_index` running in `vtl`. pub fn new(vp_index: u32, vtl: Vtl) -> Self { VpExecutor { vp_index, @@ -59,11 +113,16 @@ impl VpExecutor { } } + /// Stores a closure `cmd` that will be executed on the target VP. + /// + /// The closure receives a mutable reference to the platform-specific + /// type `T` that implements `VtlPlatformTrait`. pub fn command(mut self, cmd: impl FnOnce(&mut T) + 'static) -> Self { self.cmd = Some(Box::new(cmd)); self } + /// Extracts the tuple `(vp_index, vtl, cmd)` consuming `self`. pub fn get(mut self) -> (u32, Vtl, Option>) { let cmd = self.cmd.take(); (self.vp_index, self.vtl, cmd) diff --git a/opentmk/opentmk/src/hypercall.rs b/opentmk/opentmk/src/hypercall.rs index 9129635272..07cc09b4b2 100644 --- a/opentmk/opentmk/src/hypercall.rs +++ b/opentmk/opentmk/src/hypercall.rs @@ -6,6 +6,8 @@ #![allow(dead_code)] use arrayvec::ArrayVec; use core::mem::size_of; +use core::sync::atomic::AtomicBool; +use core::sync::atomic::Ordering; use hvdef::hypercall::EnablePartitionVtlFlags; use hvdef::hypercall::HvInputVtl; use hvdef::hypercall::InitialVpContextX64; @@ -114,11 +116,12 @@ impl HvcallPage { /// page, and the output page, so this should not be used in any /// multi-threaded capacity (which the boot shim currently is not). pub struct HvCall { - initialized: bool, input_page: HvcallPage, output_page: HvcallPage, } +static HV_PAGE_INIT_STATUS: AtomicBool = AtomicBool::new(false); + #[expect(unsafe_code)] impl HvCall { /// Hypercall to accept vtl2 pages from address start to end with VTL 2 @@ -257,8 +260,6 @@ impl HvCall { code: hvdef::HypercallCode, rep_count: Option, ) -> hvdef::hypercall::HypercallOutput { - self.init_if_needed(); - let control: hvdef::hypercall::Control = hvdef::hypercall::Control::new() .with_code(code.0) .with_rep_count(rep_count.unwrap_or_default()); @@ -501,21 +502,17 @@ impl HvCall { Ok(()) } - /// Initializes the hypercall interface if it hasn't been already. - fn init_if_needed(&mut self) { - if !self.initialized { - self.initialize(); - } - } - /// Initializes the hypercall interface. pub fn initialize(&mut self) { - assert!(!self.initialized); - + let init = HV_PAGE_INIT_STATUS.load(Ordering::SeqCst); + if init { + return; + } // TODO: revisit os id value. For now, use 1 (which is what UEFI does) let guest_os_id = hvdef::hypercall::HvGuestOsMicrosoft::new().with_os_id(1); crate::arch::hypercall::initialize(guest_os_id.into()); - self.initialized = true; + + HV_PAGE_INIT_STATUS.swap(true, Ordering::SeqCst); } /// Returns a mutable reference to the hypercall input page. @@ -526,7 +523,6 @@ impl HvCall { /// Creates a new `HvCall` instance. pub const fn new() -> Self { HvCall { - initialized: false, input_page: HvcallPage::new(), output_page: HvcallPage::new(), } @@ -707,15 +703,15 @@ impl HvCall { /// Call before jumping to kernel. pub fn uninitialize(&mut self) { - if self.initialized { + let init = HV_PAGE_INIT_STATUS.load(Ordering::SeqCst); + if init { crate::arch::hypercall::uninitialize(); - self.initialized = false; + HV_PAGE_INIT_STATUS.swap(false, Ordering::SeqCst); } } /// Returns the environment's VTL. pub fn vtl(&mut self) -> Vtl { - assert!(self.initialized); self .get_register(hvdef::HvAllArchRegisterName::VsmVpStatus.into(), None) .map_or(Vtl::Vtl0, |status| { diff --git a/opentmk/opentmk/src/main.rs b/opentmk/opentmk/src/main.rs index f8ffeb4ab8..dc331ea99d 100644 --- a/opentmk/opentmk/src/main.rs +++ b/opentmk/opentmk/src/main.rs @@ -3,6 +3,7 @@ #![no_std] #![allow(unsafe_code)] #![feature(abi_x86_interrupt)] +#![feature(naked_functions)] #![doc = include_str!("../README.md")] diff --git a/opentmk/opentmk/src/tmk_assert.rs b/opentmk/opentmk/src/tmk_assert.rs index 232ee7acb1..d4eb9f4433 100644 --- a/opentmk/opentmk/src/tmk_assert.rs +++ b/opentmk/opentmk/src/tmk_assert.rs @@ -1,7 +1,43 @@ -use core::fmt::Write; use alloc::string::{String, ToString}; +use core::fmt::Write; use serde::Serialize; -use serde_json::json; + +#[derive(Serialize)] +struct AssertJson<'a, T> +where + T: Serialize, +{ + #[serde(rename = "type")] + type_: &'a str, + level: &'a str, + message: &'a str, + line: String, + assertion_result: bool, + testname: &'a T, +} + +impl<'a, T> AssertJson<'a, T> +where + T: Serialize, +{ + fn new( + type_: &'a str, + level: &'a str, + message: &'a str, + line: String, + assertion_result: bool, + testname: &'a T, + ) -> Self { + Self { + type_, + level, + message, + line, + assertion_result, + testname, + } + } +} pub fn format_assert_json_string( s: &str, @@ -13,14 +49,9 @@ pub fn format_assert_json_string( where T: Serialize, { - let out = json!({ - "type:": "assert", - "level": "WARN", - "message": s, - "line": line, - "assertion_result": assert_result, - "testname": testname, - }); + let assert_json = AssertJson::new("assert", "WARN", s, line, assert_result, testname); + + let out = serde_json::to_string(&assert_json).expect("Failed to serialize assert JSON"); let mut out = out.to_string(); if terminate_new_line { out.push('\n'); @@ -28,88 +59,23 @@ where return out; } - pub fn write_str(s: &str) { let _ = crate::tmk_logger::LOGGER.get_writter().write_str(s); } #[macro_export] macro_rules! tmk_assert { - ($condition:expr, $message:expr) => { - { - let file = core::file!(); - let line = line!(); - let file_line = format!("{}:{}", file, line); - let expn = stringify!($condition); - let result: bool = $condition; - let js = crate::tmk_assert::format_assert_json_string( - &expn, true, file_line, result, &$message, - ); - crate::tmk_assert::write_str(&js); - if !result { - panic!("Assertion failed: {}", $message); - } + ($condition:expr, $message:expr) => {{ + let file = core::file!(); + let line = line!(); + let file_line = format!("{}:{}", file, line); + let expn = stringify!($condition); + let result: bool = $condition; + let js = + crate::tmk_assert::format_assert_json_string(&expn, true, file_line, result, &$message); + crate::tmk_assert::write_str(&js); + if !result { + panic!("Assertion failed: {}", $message); } - }; -} - -pub trait AssertResult { - fn unpack_assert(self) -> T; - fn expect_assert(self, message: &str) -> T; -} - -pub trait AssertOption { - fn expect_assert(self, message: &str) -> T; -} - -// impl AssertOption for Option { -// fn expect_assert(self, message: &str) -> T { -// match self { -// Some(value) => value, -// None => { -// let call: &core::panic::Location<'_> = core::panic::Location::caller(); -// let file_line = format!("{}:{}", call.file(), call.line()); -// let expn = type_name::>(); -// let js = format_assert_json_string(expn, true, file_line, false, &message); -// write_str(&js); -// panic!("Assertion failed: {}", message); -// } -// } -// } -// } - -// impl AssertResult for Result -// where -// E: core::fmt::Debug, -// { -// fn unpack_assert(self) -> T { -// match self { -// Ok(value) => value, -// Err(err) => { -// let call: &core::panic::Location<'_> = core::panic::Location::caller(); -// let file_line = format!("{}:{}", call.file(), call.line()); -// let expn = type_name::>(); -// let js = -// format_assert_json_string(expn, true, file_line, false, &"ResultTest"); -// write_str(&js); -// panic!("Assertion failed: {:?}", err); -// } -// } -// } -// fn expect_assert(self, message: &str) -> T { -// match self { -// Ok(value) => { -// log::info!("result is ok, condition not met for: {}", message); -// value -// } -// Err(err) => { -// let call: &core::panic::Location<'_> = core::panic::Location::caller(); -// let file_line = format!("{}:{}", call.file(), call.line()); -// let expn = type_name::>(); -// let js = format_assert_json_string(expn, true, file_line, false, &message); -// write_str(&js); -// panic!("Assertion failed: {:?}", err); -// } -// } -// } -// } + }}; +} \ No newline at end of file diff --git a/opentmk/opentmk/src/tmk_logger.rs b/opentmk/opentmk/src/tmk_logger.rs index f594f60b63..4c974103b4 100644 --- a/opentmk/opentmk/src/tmk_logger.rs +++ b/opentmk/opentmk/src/tmk_logger.rs @@ -1,11 +1,33 @@ use core::fmt::Write; -use alloc::{fmt::format, string::{String, ToString}}; +use alloc::{ + fmt::format, + string::{String, ToString}, +}; use log::SetLoggerError; -use serde_json::json; use sync_nostd::{Mutex, MutexGuard}; use crate::arch::serial::{InstrIoAccess, Serial}; +use serde::Serialize; + +#[derive(Serialize)] +struct LogEntry { + log_type: &'static str, + level: String, + message: String, + line: String, +} + +impl LogEntry { + fn new(level: log::Level, message: &String, line: &String) -> Self { + LogEntry { + log_type: "log", + level: level.as_str().to_string(), + message: message.clone(), + line: line.clone(), + } + } +} pub fn format_log_string_to_json( message: &String, @@ -13,12 +35,8 @@ pub fn format_log_string_to_json( terminate_new_line: bool, level: log::Level, ) -> String { - let out = json!({ - "type:": "log", - "level": level.as_str(), - "message": message, - "line": line, - }); + let log_entry = LogEntry::new(level, message, line); + let out = serde_json::to_string(&log_entry).unwrap(); let mut out = out.to_string(); if terminate_new_line { out.push('\n'); @@ -40,7 +58,10 @@ where } } - pub fn get_writter(&self) -> MutexGuard<'_, T> where T: Write + Send { + pub fn get_writter(&self) -> MutexGuard<'_, T> + where + T: Write + Send, + { self.writter.lock() } } diff --git a/opentmk/opentmk/src/uefi/hypvctx.rs b/opentmk/opentmk/src/uefi/hypvctx.rs index 8ed85a2a3d..10139bb25d 100644 --- a/opentmk/opentmk/src/uefi/hypvctx.rs +++ b/opentmk/opentmk/src/uefi/hypvctx.rs @@ -47,14 +47,11 @@ pub struct HvTestCtx { pub my_vtl: Vtl, } -impl Drop for HvTestCtx { - fn drop(&mut self) { - self.hvcall.uninitialize(); - } -} - - impl SecureInterceptPlatformTrait for HvTestCtx { + /// Configure the Secure Interrupt Message Page (SIMP) and the first + /// SynIC interrupt (SINT0) so that the hypervisor can vector + /// hypervisor side notifications back to the guest. + /// Returns [`TmkResult::Err`] if the allocation of the SIMP buffer fails. fn setup_secure_intercept(&mut self, interrupt_idx: u8) -> TmkResult<()> { let layout = Layout::from_size_align(4096, ALIGNMENT) .or_else(|_| Err(TmkError(TmkErrorType::AllocationFailed)))?; @@ -78,9 +75,10 @@ impl SecureInterceptPlatformTrait for HvTestCtx { } } - - impl InterruptPlatformTrait for HvTestCtx { + /// Install an interrupt handler for the supplied vector on x86-64. + /// For non-x86-64 targets the call returns + /// [`TmkErrorType::NotImplemented`]. fn set_interrupt_idx(&mut self, interrupt_idx: u8, handler: fn()) -> TmkResult<()> { #[cfg(target_arch = "x86_64")] { @@ -94,29 +92,31 @@ impl InterruptPlatformTrait for HvTestCtx { } } + /// Initialise the minimal in-guest interrupt infrastructure + /// (IDT/GIC, etc. depending on architecture). fn setup_interrupt_handler(&mut self) -> TmkResult<()> { crate::arch::interrupt::init(); Ok(()) } } - - impl MsrPlatformTrait for HvTestCtx { + /// Read an MSR directly from the CPU and return the raw value. fn read_msr(&mut self, msr: u32) -> TmkResult { let r = unsafe { read_msr(msr) }; Ok(r) } + /// Write an MSR directly on the CPU. fn write_msr(&mut self, msr: u32, value: u64) -> TmkResult<()> { unsafe { write_msr(msr, value) }; Ok(()) } } - - impl VirtualProcessorPlatformTrait for HvTestCtx { + /// Fetch the content of the specified architectural register from + /// the current VTL for the executing VP. fn get_register(&mut self, reg: u32) -> TmkResult { #[cfg(target_arch = "x86_64")] { @@ -140,6 +140,8 @@ impl VirtualProcessorPlatformTrait for HvTestCtx { } } + /// Return the number of logical processors present in the machine + /// by issuing the `cpuid` leaf 1 call on x86-64. fn get_vp_count(&self) -> TmkResult { #[cfg(target_arch = "x86_64")] { @@ -166,6 +168,9 @@ impl VirtualProcessorPlatformTrait for HvTestCtx { } } + /// Push a command onto the per-VP linked-list so it will be executed + /// by the busy-loop running in `exec_handler`. No scheduling happens + /// here – we simply enqueue. fn queue_command_vp(&mut self, cmd: VpExecutor) -> TmkResult<()> { let (vp_index, vtl, cmd) = cmd.get(); let cmd = cmd.ok_or_else(|| TmkError(TmkErrorType::QueueCommandFailed))?; @@ -177,6 +182,15 @@ impl VirtualProcessorPlatformTrait for HvTestCtx { Ok(()) } + /// Ensure the target VP is running in the requested VTL and queue + /// the command for execution. + /// – If the VP is not yet running, it is started with a default + /// context. + /// – If the command targets a different VTL than the current one, + /// control is switched via `vtl_call` / `vtl_return` so that the + /// executor loop can pick the command up. + /// in short every VP acts as an executor engine and + /// spins in `exec_handler` waiting for work. fn start_on_vp(&mut self, cmd: VpExecutor) -> TmkResult<()> { let (vp_index, vtl, cmd) = cmd.get(); let cmd = cmd.ok_or_else(|| TmkError(TmkErrorType::InvalidParameter))?; @@ -248,6 +262,8 @@ impl VirtualProcessorPlatformTrait for HvTestCtx { Ok(()) } + /// Start the given VP in the current VTL using a freshly captured + /// context and *do not* queue any additional work. fn start_running_vp_with_default_context( &mut self, cmd: VpExecutor, @@ -259,28 +275,35 @@ impl VirtualProcessorPlatformTrait for HvTestCtx { Ok(()) } + /// Return the index of the VP that is currently executing this code. fn get_current_vp(&self) -> TmkResult { Ok(self.my_vp_idx) } } impl VtlPlatformTrait for HvTestCtx { + /// Apply VTL protections to the supplied GPA range so that only the + /// provided VTL can access it. fn apply_vtl_protection_for_memory(&mut self, range: Range, vtl: Vtl) -> TmkResult<()> { self.hvcall .apply_vtl_protections(MemoryRange::new(range), vtl)?; Ok(()) } + /// Enable the specified VTL on a VP and seed it with a default + /// context captured from the current execution environment. fn enable_vp_vtl_with_default_context(&mut self, vp_index: u32, vtl: Vtl) -> TmkResult<()> { let vp_ctx = self.get_default_context()?; self.hvcall.enable_vp_vtl(vp_index, vtl, Some(vp_ctx))?; Ok(()) } + /// Return the VTL in which the current code is running. fn get_current_vtl(&self) -> TmkResult { Ok(self.my_vtl) } + /// Inject a default context into an already existing VP/VTL pair. fn set_default_ctx_to_vp(&mut self, vp_index: u32, vtl: Vtl) -> TmkResult<()> { let i: u8 = match vtl { Vtl::Vtl0 => 0, @@ -300,6 +323,7 @@ impl VtlPlatformTrait for HvTestCtx { Ok(()) } + /// Enable VTL support for the entire partition. fn setup_partition_vtl(&mut self, vtl: Vtl) -> TmkResult<()> { self.hvcall .enable_partition_vtl(hvdef::HV_PARTITION_ID_SELF, vtl)?; @@ -307,21 +331,28 @@ impl VtlPlatformTrait for HvTestCtx { Ok(()) } + /// Turn on VTL protections for the currently running VTL. fn setup_vtl_protection(&mut self) -> TmkResult<()> { self.hvcall.enable_vtl_protection(HvInputVtl::CURRENT_VTL)?; log::info!("enabled vtl protections for the partition."); Ok(()) } + /// Switch execution from the current (low) VTL to the next higher + /// one (`vtl_call`). fn switch_to_high_vtl(&mut self) { HvCall::vtl_call(); } + /// Return from a high VTL back to the low VTL (`vtl_return`). fn switch_to_low_vtl(&mut self) { HvCall::vtl_return(); } } + impl HvTestCtx { + /// Construct an *un-initialised* test context. + /// Call [`HvTestCtx::init`] before using the value. pub const fn new() -> Self { HvTestCtx { hvcall: HvCall::new(), @@ -331,6 +362,10 @@ impl HvTestCtx { } } + /// Perform the one-time initialisation sequence: + /// – initialise the hypercall page, + /// – discover the VP count and create command queues, + /// – record the current VTL. pub fn init(&mut self) -> TmkResult<()> { self.hvcall.initialize(); let vp_count = self.get_vp_count()?; @@ -341,6 +376,9 @@ impl HvTestCtx { Ok(()) } + /// Busy-loop executor that runs on every VP. + /// Extracts commands from the per-VP queue and executes them in the + /// appropriate VTL, switching VTLs when necessary. fn exec_handler() { let mut ctx = HvTestCtx::new(); ctx.init().expect("error: failed to init on a VP"); @@ -359,6 +397,7 @@ impl HvTestCtx { let mut cmdt = cmdt().lock(); let d = cmdt.get_mut(&ctx.my_vp_idx); if d.is_some() { + log::info!("vp: {} has commands to execute", ctx.my_vp_idx); let d = d.unwrap(); if !d.is_empty() { let (_c, v) = d.front().unwrap(); @@ -387,11 +426,15 @@ impl HvTestCtx { } #[cfg(target_arch = "x86_64")] + /// Capture the current VP context, patch the entry point and stack + /// so that the new VP starts in `exec_handler`. fn get_default_context(&mut self) -> Result { return self.run_fn_with_current_context(HvTestCtx::exec_handler); } #[cfg(target_arch = "x86_64")] + /// Helper to wrap an arbitrary function inside a captured VP context + /// that can later be used to start a new VP/VTL instance. fn run_fn_with_current_context(&mut self, func: fn()) -> Result { use super::alloc::SIZE_1MB; diff --git a/opentmk/opentmk/src/uefi/tests/hv_error_vp_start.rs b/opentmk/opentmk/src/uefi/tests/hv_error_vp_start.rs index bda77c6322..45aa4b2248 100644 --- a/opentmk/opentmk/src/uefi/tests/hv_error_vp_start.rs +++ b/opentmk/opentmk/src/uefi/tests/hv_error_vp_start.rs @@ -1,8 +1,7 @@ use hvdef::Vtl; use sync_nostd::Channel; -use crate::{context::{VirtualProcessorPlatformTrait, VpExecutor, VtlPlatformTrait}, tmk_assert, tmkdefs::TmkErrorType}; - +use crate::{context::{VirtualProcessorPlatformTrait, VpExecutor, VtlPlatformTrait}, tmk_assert}; pub fn exec(ctx: &mut T) where diff --git a/opentmk/opentmk/src/uefi/tests/hv_misc.rs b/opentmk/opentmk/src/uefi/tests/hv_misc.rs index 0621155ebd..a912a94595 100644 --- a/opentmk/opentmk/src/uefi/tests/hv_misc.rs +++ b/opentmk/opentmk/src/uefi/tests/hv_misc.rs @@ -8,7 +8,6 @@ use crate::context::{ // This test is to verify that the VTL protections are working as expected. // The stack values in VTL0 are changing after interrupt handling in VTL1. use crate::tmk_assert; -use crate::tmk_assert::{AssertOption, AssertResult}; use crate::tmkdefs::TmkResult; use crate::uefi::alloc::{ALLOCATOR, SIZE_1MB}; use crate::{context, uefi::hypvctx}; @@ -54,20 +53,9 @@ where ctx.start_on_vp(VpExecutor::new(0, Vtl::Vtl1).command(move |ctx: &mut T| { log::info!("successfully started running VTL1 on vp0."); ctx.setup_secure_intercept(0x30); - ctx.set_interrupt_idx(0x30, || { + ctx.set_interrupt_idx(0x30, move || { log::info!("interrupt fired!"); - let mut hv_test_ctx = HvTestCtx::new(); - hv_test_ctx.init(); - - let c = hv_test_ctx.get_register(HvAllArchRegisterName::VsmVpStatus.0); - tmk_assert!(c.is_ok(), "read should succeed"); - - let c = c.unwrap(); - let cp = HvRegisterVsmVpStatus::from_bits(c as u64); - - log::info!("VSM VP Status: {:?}", cp); - log::info!("interrupt handled!"); }); @@ -120,6 +108,8 @@ where ); } + + log::info!("after ctx ptr: {:p}", &ctx as *const _); unsafe { asm!("mov {}, rsp", out(reg) l) }; log::info!("rsp: 0x{:x}", l); diff --git a/opentmk/opentmk/src/uefi/tests/mod.rs b/opentmk/opentmk/src/uefi/tests/mod.rs index 6ba7df3c84..75fc35f851 100644 --- a/opentmk/opentmk/src/uefi/tests/mod.rs +++ b/opentmk/opentmk/src/uefi/tests/mod.rs @@ -1,12 +1,13 @@ +#![allow(dead_code)] use super::hypvctx::HvTestCtx; -pub mod hv_processor; -pub mod hv_misc; +mod hv_processor; +mod hv_misc; mod hv_error_vp_start; pub fn run_test() { let mut ctx = HvTestCtx::new(); ctx.init().expect("failed to init on BSP"); - hv_error_vp_start::exec(&mut ctx); + hv_processor::exec(&mut ctx); } \ No newline at end of file