diff --git a/Cargo.lock b/Cargo.lock index f86c244203..af26549104 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3547,6 +3547,9 @@ name = "lazy_static" version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +dependencies = [ + "spin 0.5.2", +] [[package]] name = "libc" @@ -3609,6 +3612,15 @@ dependencies = [ "escape8259", ] +[[package]] +name = "linked_list_allocator" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9afa463f5405ee81cdb9cc2baf37e08ec7e4c8209442b5d72c04cfb2cd6e6286" +dependencies = [ + "spinning_top", +] + [[package]] name = "linkme" version = "0.3.31" @@ -4769,6 +4781,29 @@ dependencies = [ "thiserror 2.0.0", ] +[[package]] +name = "opentmk" +version = "0.0.0" +dependencies = [ + "arrayvec", + "bitfield-struct", + "cfg-if", + "hvdef", + "lazy_static", + "linked_list_allocator", + "log", + "memory_range", + "minimal_rt", + "minimal_rt_build", + "serde", + "serde_json", + "sync_nostd", + "uefi", + "x86_64", + "x86defs", + "zerocopy 0.8.14", +] + [[package]] name = "openvmm" version = "0.0.0" @@ -6360,6 +6395,30 @@ dependencies = [ "zerocopy 0.8.14", ] +[[package]] +name = "spin" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" + +[[package]] +name = "spin" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5fe4ccb98d9c292d56fec89a5e07da7fc4cf0dc11e156b41793132775d3e591" +dependencies = [ + "lock_api", +] + +[[package]] +name = "spinning_top" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b9eb1a2f4c41445a3a0ff9abc5221c5fcd28e1f13cd7c0397706f9ac938ddb0" +dependencies = [ + "lock_api", +] + [[package]] name = "stackfuture" version = "0.3.0" @@ -6481,6 +6540,13 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "sync_nostd" +version = "0.1.0" +dependencies = [ + "spin 0.10.0", +] + [[package]] name = "tap" version = "1.0.1" @@ -8736,6 +8802,12 @@ dependencies = [ "vmsocket", ] +[[package]] +name = "volatile" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "442887c63f2c839b346c192d047a7c87e73d0689c9157b00b53dcc27dd5ea793" + [[package]] name = "vpci" version = "0.0.0" @@ -9313,6 +9385,18 @@ dependencies = [ "tap", ] +[[package]] +name = "x86_64" +version = "0.15.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f042214de98141e9c8706e8192b73f56494087cc55ebec28ce10f26c5c364ae" +dependencies = [ + "bit_field", + "bitflags 2.6.0", + "rustversion", + "volatile", +] + [[package]] name = "x86defs" version = "0.0.0" diff --git a/Cargo.toml b/Cargo.toml index 1dd0ca13fb..ed1823f711 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -42,6 +42,9 @@ members = [ "vm/loader/igvmfilegen", "vm/vmgs/vmgs_lib", "vm/vmgs/vmgstool", + # opentmk + "opentmk/opentmk", + "opentmk/sync" ] exclude = [ "xsync", @@ -374,6 +377,9 @@ vnc_worker_defs = { path = "workers/vnc_worker_defs" } vnc = { path = "workers/vnc_worker/vnc" } profiler_worker = { path = "openhcl/profiler_worker" } +# opentmk +sync_nostd = { path = "opentmk/sync"} + # crates.io anyhow = "1.0" arbitrary = "1.3" @@ -445,9 +451,11 @@ jiff = "0.1" kvm-bindings = "0.7" # Use of these specific REPO will go away when changes are taken upstream. landlock = "0.3.1" +lazy_static = { version = "1.4.0", features = ["spin_no_std"] } libc = "0.2" libfuzzer-sys = "0.4" libtest-mimic = "0.8" +linked_list_allocator = "0.10.5" linkme = "0.3.9" log = "0.4" macaddr = "1.0" @@ -493,6 +501,7 @@ smallbox = "0.8" smallvec = "1.8" smoltcp = { version = "0.8", default-features = false } socket2 = "0.5" +spin = "0.10.0" stackfuture = "0.3" static_assertions = "1.1" syn = "2" @@ -520,6 +529,7 @@ winapi = "0.3" windows = "0.59" windows-service = "0.7" windows-sys = "0.52" +x86_64 = "0.15.2" xshell = "=0.2.2" # pin to 0.2.2 to work around https://github.com/matklad/xshell/issues/63 xshell-macros = "0.2" # We add the derive feature here since the vast majority of our crates use it. diff --git a/opentmk/opentmk/Cargo.toml b/opentmk/opentmk/Cargo.toml new file mode 100644 index 0000000000..64926f8e7c --- /dev/null +++ b/opentmk/opentmk/Cargo.toml @@ -0,0 +1,36 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +[package] +name = "opentmk" +edition.workspace = true +rust-version.workspace = true + +[dependencies] +arrayvec.workspace = true +bitfield-struct.workspace = true +cfg-if.workspace = true +hvdef = {workspace = true} +lazy_static.workspace = true +linked_list_allocator.workspace = true +log.workspace = true +memory_range.workspace = true +minimal_rt.workspace = true +serde = { version = "1.0", default-features = false, features = ["derive"]} +serde_json = { version = "1.0", default-features = false, features = ["alloc"] } +uefi = { workspace = true, features = ["alloc"] } +x86_64.workspace = true +x86defs.workspace = true +zerocopy.workspace = true +sync_nostd.workspace = true + +[lints] +workspace = true + +[build-dependencies] +minimal_rt_build.workspace = true + +[profile.release] +debug = false +strip = "debuginfo" + diff --git a/opentmk/opentmk/README.md b/opentmk/opentmk/README.md new file mode 100644 index 0000000000..a2658e8753 --- /dev/null +++ b/opentmk/opentmk/README.md @@ -0,0 +1,3 @@ +# OpenTMK + +See the guide for more info on how to build/run the code in this crate. diff --git a/opentmk/opentmk/build_deploy.sh b/opentmk/opentmk/build_deploy.sh new file mode 100755 index 0000000000..0c68e1643e --- /dev/null +++ b/opentmk/opentmk/build_deploy.sh @@ -0,0 +1,3 @@ +RUST_BACKTRACE=1 cargo build -p opentmk --target x86_64-unknown-uefi +cargo xtask guest-test uefi --bootx64 ./target/x86_64-unknown-uefi/debug/opentmk.efi +qemu-img convert -f raw -O vhdx ./target/x86_64-unknown-uefi/debug/opentmk.img ~/projects/opentmk.vhdx \ No newline at end of file diff --git a/opentmk/opentmk/src/arch/aarch64/hypercall.rs b/opentmk/opentmk/src/arch/aarch64/hypercall.rs new file mode 100644 index 0000000000..35011e089a --- /dev/null +++ b/opentmk/opentmk/src/arch/aarch64/hypercall.rs @@ -0,0 +1,27 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +/// Writes a synthehtic register to tell the hypervisor the OS ID for the boot shim. +fn report_os_id(guest_os_id: u64) { + // On ARM64, to be able to make hypercalls, one needs first to set the Guest OS ID + // synthetic register using a hypercall. Can't use `Hvcall::set_register` at that will + // lead to the infinite recursion as that function will first try initializing hypercalls + // with setting a register. + // + // Only one very specific HvSetVpRegisters hypercall is allowed to set the Guest OS ID + // (this is TLFS section 17.4.4.1.1 and 5.3), and that must be the fast hypercall. + let _ = minimal_rt::arch::hypercall::set_register_fast( + hvdef::HvArm64RegisterName::GuestOsId.into(), + guest_os_id.into(), + ); +} + +pub(crate) fn initialize(guest_os_id: u64) { + // We are assuming we are running under a Microsoft hypervisor. + report_os_id(guest_os_id); +} + +/// Call before jumping to kernel. +pub(crate) fn uninitialize() { + report_os_id(0); +} \ No newline at end of file diff --git a/opentmk/opentmk/src/arch/aarch64/mod.rs b/opentmk/opentmk/src/arch/aarch64/mod.rs new file mode 100644 index 0000000000..c9ab11a58c --- /dev/null +++ b/opentmk/opentmk/src/arch/aarch64/mod.rs @@ -0,0 +1,3 @@ +pub use minimal_rt::arch::aarch64::serial; + +pub mod hypercall; diff --git a/opentmk/opentmk/src/arch/mod.rs b/opentmk/opentmk/src/arch/mod.rs new file mode 100644 index 0000000000..4bcf7781ee --- /dev/null +++ b/opentmk/opentmk/src/arch/mod.rs @@ -0,0 +1,16 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +//! Imports and re-exports architecture-specific implementations. + +mod x86_64; + +cfg_if::cfg_if!( + if #[cfg(target_arch = "x86_64")] { + pub use x86_64::*; + } else if #[cfg(target_arch = "aarch64")] { + pub use aarch64::*; + } else { + compile_error!("target_arch is not supported"); + } +); \ No newline at end of file diff --git a/opentmk/opentmk/src/arch/x86_64/hypercall.rs b/opentmk/opentmk/src/arch/x86_64/hypercall.rs new file mode 100644 index 0000000000..1337cbe38e --- /dev/null +++ b/opentmk/opentmk/src/arch/x86_64/hypercall.rs @@ -0,0 +1,53 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +use core::ptr::addr_of; +use hvdef::HV_PAGE_SIZE; +use minimal_rt::arch::hypercall::HYPERCALL_PAGE; +use minimal_rt::arch::msr::read_msr; +use minimal_rt::arch::msr::write_msr; +#[expect(unsafe_code)] +/// Writes an MSR to tell the hypervisor the OS ID for the boot shim. +fn report_os_id(guest_os_id: u64) { + // SAFETY: Using the contract established in the Hyper-V TLFS. + unsafe { + write_msr(hvdef::HV_X64_MSR_GUEST_OS_ID, guest_os_id); + }; +} + +#[expect(unsafe_code)] +/// Writes an MSR to tell the hypervisor where the hypercall page is +pub fn write_hypercall_msr(enable: bool) { + // SAFETY: Using the contract established in the Hyper-V TLFS. + let hypercall_contents = hvdef::hypercall::MsrHypercallContents::from(unsafe { + read_msr(hvdef::HV_X64_MSR_HYPERCALL) + }); + + let hypercall_page_num = addr_of!(HYPERCALL_PAGE) as u64 / HV_PAGE_SIZE; + + if!(!enable || !hypercall_contents.enable()) { + return; + } + let new_hv_contents: hvdef::hypercall::MsrHypercallContents = hypercall_contents.with_enable(enable).with_gpn(if enable { + hypercall_page_num + } else { + 0 + }); + + // SAFETY: Using the contract established in the Hyper-V TLFS. + unsafe { write_msr(hvdef::HV_X64_MSR_HYPERCALL, new_hv_contents.into()) }; +} + +/// Has to be called before using hypercalls. +pub(crate) fn initialize(guest_os_id: u64) { + // We are assuming we are running under a Microsoft hypervisor, so there is + // no need to check any cpuid leaves. + report_os_id(guest_os_id); + write_hypercall_msr(true); +} + +/// Call before jumping to kernel. +pub(crate) fn uninitialize() { + write_hypercall_msr(false); + report_os_id(0); +} \ No newline at end of file diff --git a/opentmk/opentmk/src/arch/x86_64/interrupt.rs b/opentmk/opentmk/src/arch/x86_64/interrupt.rs new file mode 100644 index 0000000000..1f31036de8 --- /dev/null +++ b/opentmk/opentmk/src/arch/x86_64/interrupt.rs @@ -0,0 +1,43 @@ +use x86_64::structures::idt::{InterruptDescriptorTable, InterruptStackFrame}; +use lazy_static::lazy_static; +use sync_nostd::Mutex; + +use super::interrupt_handler_register::{register_interrupt_handler, set_common_handler}; + +lazy_static! { + static ref IDT: InterruptDescriptorTable = { + let mut idt = InterruptDescriptorTable::new(); + register_interrupt_handler(&mut idt); + idt.double_fault.set_handler_fn(handler_double_fault); + idt + }; +} + +static mut HANDLERS : [fn(); 256] = [no_op; 256]; +static MUTEX: Mutex<()> = Mutex::new(()); +fn no_op() {} + +fn common_handler(_stack_frame: InterruptStackFrame, interrupt: u8) { + unsafe { HANDLERS[interrupt as usize](); } +} + +pub fn set_handler(interrupt: u8, handler: fn()) { + let _lock = MUTEX.lock(); + unsafe { HANDLERS[interrupt as usize] = handler; } +} + + +extern "x86-interrupt" fn handler_double_fault( + stack_frame: InterruptStackFrame, + _error_code: u64, +) -> ! { + log::error!("EXCEPTION:\n\tERROR_CODE: {}\n\tDOUBLE FAULT\n{:#?}", _error_code, stack_frame); + loop {} +} + +// Initialize the IDT +pub fn init() { + IDT.load(); + set_common_handler(common_handler); + x86_64::instructions::interrupts::enable(); +} \ No newline at end of file diff --git a/opentmk/opentmk/src/arch/x86_64/interrupt_handler_register.rs b/opentmk/opentmk/src/arch/x86_64/interrupt_handler_register.rs new file mode 100644 index 0000000000..a2b799b18b --- /dev/null +++ b/opentmk/opentmk/src/arch/x86_64/interrupt_handler_register.rs @@ -0,0 +1,586 @@ +#![allow(dead_code)] +use x86_64::structures::idt::{InterruptDescriptorTable, InterruptStackFrame, PageFaultErrorCode}; + +use sync_nostd::Mutex; +static mut COMMON_HANDLER: fn(InterruptStackFrame, u8) = common_handler; +static COMMON_HANDLER_MUTEX: Mutex<()> = Mutex::new(()); + + +#[unsafe(no_mangle)] +fn abstraction_handle(stack_frame: InterruptStackFrame, interrupt: u8) { + unsafe { (COMMON_HANDLER)(stack_frame, interrupt) }; + log::debug!("Interrupt: {}", interrupt); +} + +macro_rules! create_fn { + ($name:ident, $i: expr) => { + extern "x86-interrupt" fn $name(stack_frame: InterruptStackFrame) { + abstraction_handle(stack_frame, $i); + } + }; +} + +macro_rules! create_fn_create_with_errorcode { + ($name:ident, $i: expr) => { + extern "x86-interrupt" fn $name(stack_frame: InterruptStackFrame, _error_code: u64) { + abstraction_handle(stack_frame, $i); + } + }; +} + +macro_rules! create_fn_divergent_create_with_errorcode { + ($name:ident, $i: expr) => { + extern "x86-interrupt" fn $name(stack_frame: InterruptStackFrame, _error_code: u64) -> ! { + abstraction_handle(stack_frame, $i); + loop{} + } + }; +} + +macro_rules! create_fn_divergent_create { + ($name:ident, $i: expr) => { + extern "x86-interrupt" fn $name(stack_frame: InterruptStackFrame) -> ! { + abstraction_handle(stack_frame, $i); + loop{} + } + }; +} + +macro_rules! create_page_fault_fn { + ($name:ident, $i: expr) => { + extern "x86-interrupt" fn $name(stack_frame:InterruptStackFrame, _error_code: PageFaultErrorCode) { + abstraction_handle(stack_frame, $i); + } + }; +} + +macro_rules! register_interrupt_handler { + ($idt: expr, $i: expr, $name: ident) => { + $idt[$i].set_handler_fn($name); + }; +} + +fn common_handler(_stack_frame: InterruptStackFrame, interrupt: u8) { + log::info!("Default interrupt handler fired: {}", interrupt); +} + +pub fn set_common_handler(handler: fn(InterruptStackFrame, u8)) { + let _guard = COMMON_HANDLER_MUTEX.lock(); + unsafe { + COMMON_HANDLER = handler; + } +} + +extern "x86-interrupt" fn no_op(_stack_frame: InterruptStackFrame) {} + +pub fn register_interrupt_handler(idt: &mut InterruptDescriptorTable) { + idt.divide_error.set_handler_fn(handler_0); + idt.debug.set_handler_fn(handler_1); + idt.non_maskable_interrupt.set_handler_fn(handler_2); + idt.breakpoint.set_handler_fn(handler_3); + idt.overflow.set_handler_fn(handler_4); + idt.bound_range_exceeded.set_handler_fn(handler_5); + idt.invalid_opcode.set_handler_fn(handler_6); + idt.device_not_available.set_handler_fn(handler_7); + idt.double_fault.set_handler_fn(handler_8); + register_interrupt_handler!(idt, 9, handler_9); + idt.invalid_tss.set_handler_fn(handler_10); + idt.segment_not_present.set_handler_fn(handler_11); + idt.stack_segment_fault.set_handler_fn(handler_12); + idt.general_protection_fault.set_handler_fn(handler_13); + idt.page_fault.set_handler_fn(handler_14); + // Vector 15 is reserved + idt.x87_floating_point.set_handler_fn(handler_16); + idt.alignment_check.set_handler_fn(handler_17); + idt.machine_check.set_handler_fn(handler_18); + idt.simd_floating_point.set_handler_fn(handler_19); + idt.virtualization.set_handler_fn(handler_20); + idt.cp_protection_exception.set_handler_fn(handler_21); + // Vector 22-27 is reserved + idt.hv_injection_exception.set_handler_fn(handler_28); + idt.vmm_communication_exception.set_handler_fn(handler_29); + idt.security_exception.set_handler_fn(handler_30); + // Vector 31 is reserved + + register_interrupt_handler!(idt, 32, handler_32); + register_interrupt_handler!(idt, 33, handler_33); + register_interrupt_handler!(idt, 34, handler_34); + register_interrupt_handler!(idt, 35, handler_35); + register_interrupt_handler!(idt, 36, handler_36); + register_interrupt_handler!(idt, 37, handler_37); + register_interrupt_handler!(idt, 38, handler_38); + register_interrupt_handler!(idt, 39, handler_39); + register_interrupt_handler!(idt, 40, handler_40); + register_interrupt_handler!(idt, 41, handler_41); + register_interrupt_handler!(idt, 42, handler_42); + register_interrupt_handler!(idt, 43, handler_43); + register_interrupt_handler!(idt, 44, handler_44); + register_interrupt_handler!(idt, 45, handler_45); + register_interrupt_handler!(idt, 46, handler_46); + register_interrupt_handler!(idt, 47, handler_47); + register_interrupt_handler!(idt, 48, handler_48); + register_interrupt_handler!(idt, 49, handler_49); + register_interrupt_handler!(idt, 50, handler_50); + register_interrupt_handler!(idt, 51, handler_51); + register_interrupt_handler!(idt, 52, handler_52); + register_interrupt_handler!(idt, 53, handler_53); + register_interrupt_handler!(idt, 54, handler_54); + register_interrupt_handler!(idt, 55, handler_55); + register_interrupt_handler!(idt, 56, handler_56); + register_interrupt_handler!(idt, 57, handler_57); + register_interrupt_handler!(idt, 58, handler_58); + register_interrupt_handler!(idt, 59, handler_59); + register_interrupt_handler!(idt, 60, handler_60); + register_interrupt_handler!(idt, 61, handler_61); + register_interrupt_handler!(idt, 62, handler_62); + register_interrupt_handler!(idt, 63, handler_63); + register_interrupt_handler!(idt, 64, handler_64); + register_interrupt_handler!(idt, 65, handler_65); + register_interrupt_handler!(idt, 66, handler_66); + register_interrupt_handler!(idt, 67, handler_67); + register_interrupt_handler!(idt, 68, handler_68); + register_interrupt_handler!(idt, 69, handler_69); + register_interrupt_handler!(idt, 70, handler_70); + register_interrupt_handler!(idt, 71, handler_71); + register_interrupt_handler!(idt, 72, handler_72); + register_interrupt_handler!(idt, 73, handler_73); + register_interrupt_handler!(idt, 74, handler_74); + register_interrupt_handler!(idt, 75, handler_75); + register_interrupt_handler!(idt, 76, handler_76); + register_interrupt_handler!(idt, 77, handler_77); + register_interrupt_handler!(idt, 78, handler_78); + register_interrupt_handler!(idt, 79, handler_79); + register_interrupt_handler!(idt, 80, handler_80); + register_interrupt_handler!(idt, 81, handler_81); + register_interrupt_handler!(idt, 82, handler_82); + register_interrupt_handler!(idt, 83, handler_83); + register_interrupt_handler!(idt, 84, handler_84); + register_interrupt_handler!(idt, 85, handler_85); + register_interrupt_handler!(idt, 86, handler_86); + register_interrupt_handler!(idt, 87, handler_87); + register_interrupt_handler!(idt, 88, handler_88); + register_interrupt_handler!(idt, 89, handler_89); + register_interrupt_handler!(idt, 90, handler_90); + register_interrupt_handler!(idt, 91, handler_91); + register_interrupt_handler!(idt, 92, handler_92); + register_interrupt_handler!(idt, 93, handler_93); + register_interrupt_handler!(idt, 94, handler_94); + register_interrupt_handler!(idt, 95, handler_95); + register_interrupt_handler!(idt, 96, handler_96); + register_interrupt_handler!(idt, 97, handler_97); + register_interrupt_handler!(idt, 98, handler_98); + register_interrupt_handler!(idt, 99, handler_99); + register_interrupt_handler!(idt, 100, handler_100); + register_interrupt_handler!(idt, 101, handler_101); + register_interrupt_handler!(idt, 102, handler_102); + register_interrupt_handler!(idt, 103, handler_103); + register_interrupt_handler!(idt, 104, handler_104); + register_interrupt_handler!(idt, 105, handler_105); + register_interrupt_handler!(idt, 106, handler_106); + register_interrupt_handler!(idt, 107, handler_107); + register_interrupt_handler!(idt, 108, handler_108); + register_interrupt_handler!(idt, 109, handler_109); + register_interrupt_handler!(idt, 110, handler_110); + register_interrupt_handler!(idt, 111, handler_111); + register_interrupt_handler!(idt, 112, handler_112); + register_interrupt_handler!(idt, 113, handler_113); + register_interrupt_handler!(idt, 114, handler_114); + register_interrupt_handler!(idt, 115, handler_115); + register_interrupt_handler!(idt, 116, handler_116); + register_interrupt_handler!(idt, 117, handler_117); + register_interrupt_handler!(idt, 118, handler_118); + register_interrupt_handler!(idt, 119, handler_119); + register_interrupt_handler!(idt, 120, handler_120); + register_interrupt_handler!(idt, 121, handler_121); + register_interrupt_handler!(idt, 122, handler_122); + register_interrupt_handler!(idt, 123, handler_123); + register_interrupt_handler!(idt, 124, handler_124); + register_interrupt_handler!(idt, 125, handler_125); + register_interrupt_handler!(idt, 126, handler_126); + register_interrupt_handler!(idt, 127, handler_127); + register_interrupt_handler!(idt, 128, handler_128); + register_interrupt_handler!(idt, 129, handler_129); + register_interrupt_handler!(idt, 130, handler_130); + register_interrupt_handler!(idt, 131, handler_131); + register_interrupt_handler!(idt, 132, handler_132); + register_interrupt_handler!(idt, 133, handler_133); + register_interrupt_handler!(idt, 134, handler_134); + register_interrupt_handler!(idt, 135, handler_135); + register_interrupt_handler!(idt, 136, handler_136); + register_interrupt_handler!(idt, 137, handler_137); + register_interrupt_handler!(idt, 138, handler_138); + register_interrupt_handler!(idt, 139, handler_139); + register_interrupt_handler!(idt, 140, handler_140); + register_interrupt_handler!(idt, 141, handler_141); + register_interrupt_handler!(idt, 142, handler_142); + register_interrupt_handler!(idt, 143, handler_143); + register_interrupt_handler!(idt, 144, handler_144); + register_interrupt_handler!(idt, 145, handler_145); + register_interrupt_handler!(idt, 146, handler_146); + register_interrupt_handler!(idt, 147, handler_147); + register_interrupt_handler!(idt, 148, handler_148); + register_interrupt_handler!(idt, 149, handler_149); + register_interrupt_handler!(idt, 150, handler_150); + register_interrupt_handler!(idt, 151, handler_151); + register_interrupt_handler!(idt, 152, handler_152); + register_interrupt_handler!(idt, 153, handler_153); + register_interrupt_handler!(idt, 154, handler_154); + register_interrupt_handler!(idt, 155, handler_155); + register_interrupt_handler!(idt, 156, handler_156); + register_interrupt_handler!(idt, 157, handler_157); + register_interrupt_handler!(idt, 158, handler_158); + register_interrupt_handler!(idt, 159, handler_159); + register_interrupt_handler!(idt, 160, handler_160); + register_interrupt_handler!(idt, 161, handler_161); + register_interrupt_handler!(idt, 162, handler_162); + register_interrupt_handler!(idt, 163, handler_163); + register_interrupt_handler!(idt, 164, handler_164); + register_interrupt_handler!(idt, 165, handler_165); + register_interrupt_handler!(idt, 166, handler_166); + register_interrupt_handler!(idt, 167, handler_167); + register_interrupt_handler!(idt, 168, handler_168); + register_interrupt_handler!(idt, 169, handler_169); + register_interrupt_handler!(idt, 170, handler_170); + register_interrupt_handler!(idt, 171, handler_171); + register_interrupt_handler!(idt, 172, handler_172); + register_interrupt_handler!(idt, 173, handler_173); + register_interrupt_handler!(idt, 174, handler_174); + register_interrupt_handler!(idt, 175, handler_175); + register_interrupt_handler!(idt, 176, handler_176); + register_interrupt_handler!(idt, 177, handler_177); + register_interrupt_handler!(idt, 178, handler_178); + register_interrupt_handler!(idt, 179, handler_179); + register_interrupt_handler!(idt, 180, handler_180); + register_interrupt_handler!(idt, 181, handler_181); + register_interrupt_handler!(idt, 182, handler_182); + register_interrupt_handler!(idt, 183, handler_183); + register_interrupt_handler!(idt, 184, handler_184); + register_interrupt_handler!(idt, 185, handler_185); + register_interrupt_handler!(idt, 186, handler_186); + register_interrupt_handler!(idt, 187, handler_187); + register_interrupt_handler!(idt, 188, handler_188); + register_interrupt_handler!(idt, 189, handler_189); + register_interrupt_handler!(idt, 190, handler_190); + register_interrupt_handler!(idt, 191, handler_191); + register_interrupt_handler!(idt, 192, handler_192); + register_interrupt_handler!(idt, 193, handler_193); + register_interrupt_handler!(idt, 194, handler_194); + register_interrupt_handler!(idt, 195, handler_195); + register_interrupt_handler!(idt, 196, handler_196); + register_interrupt_handler!(idt, 197, handler_197); + register_interrupt_handler!(idt, 198, handler_198); + register_interrupt_handler!(idt, 199, handler_199); + register_interrupt_handler!(idt, 200, handler_200); + register_interrupt_handler!(idt, 201, handler_201); + register_interrupt_handler!(idt, 202, handler_202); + register_interrupt_handler!(idt, 203, handler_203); + register_interrupt_handler!(idt, 204, handler_204); + register_interrupt_handler!(idt, 205, handler_205); + register_interrupt_handler!(idt, 206, handler_206); + register_interrupt_handler!(idt, 207, handler_207); + register_interrupt_handler!(idt, 208, handler_208); + register_interrupt_handler!(idt, 209, handler_209); + register_interrupt_handler!(idt, 210, handler_210); + register_interrupt_handler!(idt, 211, handler_211); + register_interrupt_handler!(idt, 212, handler_212); + register_interrupt_handler!(idt, 213, handler_213); + register_interrupt_handler!(idt, 214, handler_214); + register_interrupt_handler!(idt, 215, handler_215); + register_interrupt_handler!(idt, 216, handler_216); + register_interrupt_handler!(idt, 217, handler_217); + register_interrupt_handler!(idt, 218, handler_218); + register_interrupt_handler!(idt, 219, handler_219); + register_interrupt_handler!(idt, 220, handler_220); + register_interrupt_handler!(idt, 221, handler_221); + register_interrupt_handler!(idt, 222, handler_222); + register_interrupt_handler!(idt, 223, handler_223); + register_interrupt_handler!(idt, 224, handler_224); + register_interrupt_handler!(idt, 225, handler_225); + register_interrupt_handler!(idt, 226, handler_226); + register_interrupt_handler!(idt, 227, handler_227); + register_interrupt_handler!(idt, 228, handler_228); + register_interrupt_handler!(idt, 229, handler_229); + register_interrupt_handler!(idt, 230, handler_230); + register_interrupt_handler!(idt, 231, handler_231); + register_interrupt_handler!(idt, 232, handler_232); + register_interrupt_handler!(idt, 233, handler_233); + register_interrupt_handler!(idt, 234, handler_234); + register_interrupt_handler!(idt, 235, handler_235); + register_interrupt_handler!(idt, 236, handler_236); + register_interrupt_handler!(idt, 237, handler_237); + register_interrupt_handler!(idt, 238, handler_238); + register_interrupt_handler!(idt, 239, handler_239); + register_interrupt_handler!(idt, 240, handler_240); + register_interrupt_handler!(idt, 241, handler_241); + register_interrupt_handler!(idt, 242, handler_242); + register_interrupt_handler!(idt, 243, handler_243); + register_interrupt_handler!(idt, 244, handler_244); + register_interrupt_handler!(idt, 245, handler_245); + register_interrupt_handler!(idt, 246, handler_246); + register_interrupt_handler!(idt, 247, handler_247); + register_interrupt_handler!(idt, 248, handler_248); + register_interrupt_handler!(idt, 249, handler_249); + register_interrupt_handler!(idt, 250, handler_250); + register_interrupt_handler!(idt, 251, handler_251); + register_interrupt_handler!(idt, 252, handler_252); + register_interrupt_handler!(idt, 253, handler_253); + register_interrupt_handler!(idt, 254, handler_254); + register_interrupt_handler!(idt, 255, handler_255); +} + +create_fn!(handler_0, 0); +create_fn!(handler_1, 1); +create_fn!(handler_2, 2); +create_fn!(handler_3, 3); +create_fn!(handler_4, 4); +create_fn!(handler_5, 5); +create_fn!(handler_6, 6); +create_fn!(handler_7, 7); +create_fn_divergent_create_with_errorcode!(handler_8, 8); +create_fn!(handler_9, 9); +create_fn_create_with_errorcode!(handler_10, 10); +create_fn_create_with_errorcode!(handler_11, 11); +create_fn_create_with_errorcode!(handler_12, 12); +create_fn_create_with_errorcode!(handler_13, 13); +create_page_fault_fn!(handler_14, 14); +create_fn!(handler_15, 15); +create_fn!(handler_16, 16); +create_fn_create_with_errorcode!(handler_17, 17); +create_fn_divergent_create!(handler_18, 18); +create_fn!(handler_19, 19); +create_fn!(handler_20, 20); +create_fn_create_with_errorcode!(handler_21, 21); +create_fn!(handler_22, 22); +create_fn!(handler_23, 23); +create_fn!(handler_24, 24); +create_fn!(handler_25, 25); +create_fn!(handler_26, 26); +create_fn!(handler_27, 27); +create_fn!(handler_28, 28); +create_fn_create_with_errorcode!(handler_29, 29); +create_fn_create_with_errorcode!(handler_30, 30); +create_fn!(handler_31, 31); +create_fn!(handler_32, 32); +create_fn!(handler_33, 33); +create_fn!(handler_34, 34); +create_fn!(handler_35, 35); +create_fn!(handler_36, 36); +create_fn!(handler_37, 37); +create_fn!(handler_38, 38); +create_fn!(handler_39, 39); +create_fn!(handler_40, 40); +create_fn!(handler_41, 41); +create_fn!(handler_42, 42); +create_fn!(handler_43, 43); +create_fn!(handler_44, 44); +create_fn!(handler_45, 45); +create_fn!(handler_46, 46); +create_fn!(handler_47, 47); +create_fn!(handler_48, 48); +create_fn!(handler_49, 49); +create_fn!(handler_50, 50); +create_fn!(handler_51, 51); +create_fn!(handler_52, 52); +create_fn!(handler_53, 53); +create_fn!(handler_54, 54); +create_fn!(handler_55, 55); +create_fn!(handler_56, 56); +create_fn!(handler_57, 57); +create_fn!(handler_58, 58); +create_fn!(handler_59, 59); +create_fn!(handler_60, 60); +create_fn!(handler_61, 61); +create_fn!(handler_62, 62); +create_fn!(handler_63, 63); +create_fn!(handler_64, 64); +create_fn!(handler_65, 65); +create_fn!(handler_66, 66); +create_fn!(handler_67, 67); +create_fn!(handler_68, 68); +create_fn!(handler_69, 69); +create_fn!(handler_70, 70); +create_fn!(handler_71, 71); +create_fn!(handler_72, 72); +create_fn!(handler_73, 73); +create_fn!(handler_74, 74); +create_fn!(handler_75, 75); +create_fn!(handler_76, 76); +create_fn!(handler_77, 77); +create_fn!(handler_78, 78); +create_fn!(handler_79, 79); +create_fn!(handler_80, 80); +create_fn!(handler_81, 81); +create_fn!(handler_82, 82); +create_fn!(handler_83, 83); +create_fn!(handler_84, 84); +create_fn!(handler_85, 85); +create_fn!(handler_86, 86); +create_fn!(handler_87, 87); +create_fn!(handler_88, 88); +create_fn!(handler_89, 89); +create_fn!(handler_90, 90); +create_fn!(handler_91, 91); +create_fn!(handler_92, 92); +create_fn!(handler_93, 93); +create_fn!(handler_94, 94); +create_fn!(handler_95, 95); +create_fn!(handler_96, 96); +create_fn!(handler_97, 97); +create_fn!(handler_98, 98); +create_fn!(handler_99, 99); +create_fn!(handler_100, 100); +create_fn!(handler_101, 101); +create_fn!(handler_102, 102); +create_fn!(handler_103, 103); +create_fn!(handler_104, 104); +create_fn!(handler_105, 105); +create_fn!(handler_106, 106); +create_fn!(handler_107, 107); +create_fn!(handler_108, 108); +create_fn!(handler_109, 109); +create_fn!(handler_110, 110); +create_fn!(handler_111, 111); +create_fn!(handler_112, 112); +create_fn!(handler_113, 113); +create_fn!(handler_114, 114); +create_fn!(handler_115, 115); +create_fn!(handler_116, 116); +create_fn!(handler_117, 117); +create_fn!(handler_118, 118); +create_fn!(handler_119, 119); +create_fn!(handler_120, 120); +create_fn!(handler_121, 121); +create_fn!(handler_122, 122); +create_fn!(handler_123, 123); +create_fn!(handler_124, 124); +create_fn!(handler_125, 125); +create_fn!(handler_126, 126); +create_fn!(handler_127, 127); +create_fn!(handler_128, 128); +create_fn!(handler_129, 129); +create_fn!(handler_130, 130); +create_fn!(handler_131, 131); +create_fn!(handler_132, 132); +create_fn!(handler_133, 133); +create_fn!(handler_134, 134); +create_fn!(handler_135, 135); +create_fn!(handler_136, 136); +create_fn!(handler_137, 137); +create_fn!(handler_138, 138); +create_fn!(handler_139, 139); +create_fn!(handler_140, 140); +create_fn!(handler_141, 141); +create_fn!(handler_142, 142); +create_fn!(handler_143, 143); +create_fn!(handler_144, 144); +create_fn!(handler_145, 145); +create_fn!(handler_146, 146); +create_fn!(handler_147, 147); +create_fn!(handler_148, 148); +create_fn!(handler_149, 149); +create_fn!(handler_150, 150); +create_fn!(handler_151, 151); +create_fn!(handler_152, 152); +create_fn!(handler_153, 153); +create_fn!(handler_154, 154); +create_fn!(handler_155, 155); +create_fn!(handler_156, 156); +create_fn!(handler_157, 157); +create_fn!(handler_158, 158); +create_fn!(handler_159, 159); +create_fn!(handler_160, 160); +create_fn!(handler_161, 161); +create_fn!(handler_162, 162); +create_fn!(handler_163, 163); +create_fn!(handler_164, 164); +create_fn!(handler_165, 165); +create_fn!(handler_166, 166); +create_fn!(handler_167, 167); +create_fn!(handler_168, 168); +create_fn!(handler_169, 169); +create_fn!(handler_170, 170); +create_fn!(handler_171, 171); +create_fn!(handler_172, 172); +create_fn!(handler_173, 173); +create_fn!(handler_174, 174); +create_fn!(handler_175, 175); +create_fn!(handler_176, 176); +create_fn!(handler_177, 177); +create_fn!(handler_178, 178); +create_fn!(handler_179, 179); +create_fn!(handler_180, 180); +create_fn!(handler_181, 181); +create_fn!(handler_182, 182); +create_fn!(handler_183, 183); +create_fn!(handler_184, 184); +create_fn!(handler_185, 185); +create_fn!(handler_186, 186); +create_fn!(handler_187, 187); +create_fn!(handler_188, 188); +create_fn!(handler_189, 189); +create_fn!(handler_190, 190); +create_fn!(handler_191, 191); +create_fn!(handler_192, 192); +create_fn!(handler_193, 193); +create_fn!(handler_194, 194); +create_fn!(handler_195, 195); +create_fn!(handler_196, 196); +create_fn!(handler_197, 197); +create_fn!(handler_198, 198); +create_fn!(handler_199, 199); +create_fn!(handler_200, 200); +create_fn!(handler_201, 201); +create_fn!(handler_202, 202); +create_fn!(handler_203, 203); +create_fn!(handler_204, 204); +create_fn!(handler_205, 205); +create_fn!(handler_206, 206); +create_fn!(handler_207, 207); +create_fn!(handler_208, 208); +create_fn!(handler_209, 209); +create_fn!(handler_210, 210); +create_fn!(handler_211, 211); +create_fn!(handler_212, 212); +create_fn!(handler_213, 213); +create_fn!(handler_214, 214); +create_fn!(handler_215, 215); +create_fn!(handler_216, 216); +create_fn!(handler_217, 217); +create_fn!(handler_218, 218); +create_fn!(handler_219, 219); +create_fn!(handler_220, 220); +create_fn!(handler_221, 221); +create_fn!(handler_222, 222); +create_fn!(handler_223, 223); +create_fn!(handler_224, 224); +create_fn!(handler_225, 225); +create_fn!(handler_226, 226); +create_fn!(handler_227, 227); +create_fn!(handler_228, 228); +create_fn!(handler_229, 229); +create_fn!(handler_230, 230); +create_fn!(handler_231, 231); +create_fn!(handler_232, 232); +create_fn!(handler_233, 233); +create_fn!(handler_234, 234); +create_fn!(handler_235, 235); +create_fn!(handler_236, 236); +create_fn!(handler_237, 237); +create_fn!(handler_238, 238); +create_fn!(handler_239, 239); +create_fn!(handler_240, 240); +create_fn!(handler_241, 241); +create_fn!(handler_242, 242); +create_fn!(handler_243, 243); +create_fn!(handler_244, 244); +create_fn!(handler_245, 245); +create_fn!(handler_246, 246); +create_fn!(handler_247, 247); +create_fn!(handler_248, 248); +create_fn!(handler_249, 249); +create_fn!(handler_250, 250); +create_fn!(handler_251, 251); +create_fn!(handler_252, 252); +create_fn!(handler_253, 253); +create_fn!(handler_254, 254); +create_fn!(handler_255, 255); diff --git a/opentmk/opentmk/src/arch/x86_64/mod.rs b/opentmk/opentmk/src/arch/x86_64/mod.rs new file mode 100644 index 0000000000..81cead476e --- /dev/null +++ b/opentmk/opentmk/src/arch/x86_64/mod.rs @@ -0,0 +1,4 @@ +pub mod hypercall; +pub mod serial; +pub mod interrupt; +mod interrupt_handler_register; \ No newline at end of file diff --git a/opentmk/opentmk/src/arch/x86_64/serial.rs b/opentmk/opentmk/src/arch/x86_64/serial.rs new file mode 100644 index 0000000000..bb16808b60 --- /dev/null +++ b/opentmk/opentmk/src/arch/x86_64/serial.rs @@ -0,0 +1,124 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +//! Serial output for debugging. +#![allow(static_mut_refs)] +use core::arch::asm; +use core::fmt; +use sync_nostd::Mutex; + +const COM4: u16 = 0x2E8; +static mut MUTEX : Mutex<()> = Mutex::new(()); + +/// Write a byte to a port. +/// +/// # Safety +/// +/// The caller must be sure that the given port is safe to write to, and that the +/// given value is safe for it. +unsafe fn outb(port: u16, data: u8) { + // SAFETY: The caller has assured us this is safe. + unsafe { + asm! { + "out dx, al", + in("dx") port, + in("al") data, + } + } +} + +/// Read a byte from a port. +/// +/// # Safety +/// +/// The caller must be sure that the given port is safe to read from. +unsafe fn inb(port: u16) -> u8 { + let mut data; + // SAFETY: The caller has assured us this is safe. + unsafe { + asm! { + "in al, dx", + in("dx") port, + out("al") data, + } + } + data +} + +/// A trait to access io ports used by the serial device. +pub trait IoAccess { + /// Issue an in byte instruction. + /// + /// # Safety + /// + /// The caller must be sure that the given port is safe to read from. + unsafe fn inb(&self, port: u16) -> u8; + /// Issue an out byte instruction. + /// + /// # Safety + /// + /// The caller must be sure that the given port is safe to write to, and that the + /// given value is safe for it. + unsafe fn outb(&self, port: u16, data: u8); +} + +/// A struct to access io ports using in/out instructions. +pub struct InstrIoAccess; + +impl IoAccess for InstrIoAccess { + unsafe fn inb(&self, port: u16) -> u8 { + // SAFETY: The serial port caller has specified a valid port. + unsafe { inb(port) } + } + + unsafe fn outb(&self, port: u16, data: u8) { + // SAFETY: The serial port caller has specified a valid port and data. + unsafe { outb(port, data) } + } +} + +/// A writer for the COM3 UART. +pub struct Serial { + io: T, +} + +impl Serial { + /// Initialize the serial port. + pub fn init(io: T) -> Self { + // SAFETY: Writing these values to the serial device is safe. + unsafe { + io.outb(COM4 + 1, 0x00); // Disable all interrupts + io.outb(COM4 + 2, 0xC7); // Enable FIFO, clear them, with 14-byte threshold + io.outb(COM4 + 4, 0x0F); + } + + Self { io } + } + + /// Create an instance without calling init. + pub const fn new(io: T) -> Self { + Self { io } + } + + fn write_byte(&self, b: u8) { + // SAFETY: Reading and writing text to the serial device is safe. + unsafe { + while self.io.inb(COM4 + 5) & 0x20 == 0 {} + self.io.outb(COM4, b); + } + } +} + + +impl fmt::Write for Serial { + fn write_str(&mut self, s: &str) -> fmt::Result { + let _guard = unsafe { MUTEX.lock() }; + for &b in s.as_bytes() { + if b == b'\n' { + self.write_byte(b'\r'); + } + self.write_byte(b); + } + Ok(()) + } +} diff --git a/opentmk/opentmk/src/context.rs b/opentmk/opentmk/src/context.rs new file mode 100644 index 0000000000..0962b81a99 --- /dev/null +++ b/opentmk/opentmk/src/context.rs @@ -0,0 +1,130 @@ +#![allow(dead_code)] +use core::ops::Range; + +use alloc::boxed::Box; +use hvdef::Vtl; + +use crate::tmkdefs::TmkResult; + +pub trait SecureInterceptPlatformTrait { + /// Installs a secure-world intercept for the given interrupt. + /// + /// The platform must arrange that the supplied `interrupt_idx` + /// triggers a VM-exit or any other mechanism that transfers control + /// to the TMK secure handler. + /// + /// Returns `Ok(())` on success or an error wrapped in `TmkResult`. + fn setup_secure_intercept(&mut self, interrupt_idx: u8) -> TmkResult<()>; +} + +pub trait InterruptPlatformTrait { + /// Associates an interrupt vector with a handler inside the + /// non-secure world. + /// + /// * `interrupt_idx` – IDT/GIC index to program + /// * `handler` – Function that will be executed when the interrupt + /// fires. + fn set_interrupt_idx(&mut self, interrupt_idx: u8, handler: fn()) -> TmkResult<()>; + + /// Finalises platform specific interrupt setup (enables the table, + /// unmasks lines, etc.). + fn setup_interrupt_handler(&mut self) -> TmkResult<()>; +} + +pub trait MsrPlatformTrait { + /// Reads the content of `msr`. + /// + /// Returns the 64-bit value currently stored in that MSR. + fn read_msr(&mut self, msr: u32) -> TmkResult; + + /// Writes `value` into `msr`. + fn write_msr(&mut self, msr: u32, value: u64) -> TmkResult<()>; +} + +pub trait VirtualProcessorPlatformTrait +where + T: VtlPlatformTrait, +{ + /// Returns the index of the virtual CPU currently executing this + /// code. + fn get_current_vp(&self) -> TmkResult; + + /// Reads the architecture specific register identified by `reg`. + fn get_register(&mut self, reg: u32) -> TmkResult; + + /// Total number of online VPs in the partition. + fn get_vp_count(&self) -> TmkResult; + + /// Queues `cmd` to run later on the VP described inside the + /// `VpExecutor`. + fn queue_command_vp(&mut self, cmd: VpExecutor) -> TmkResult<()>; + + /// Synchronously executes `cmd` on its target VP. + fn start_on_vp(&mut self, cmd: VpExecutor) -> TmkResult<()>; + + /// Starts the target VP (if required) and executes `cmd` with a + /// platform provided default VTL context. + fn start_running_vp_with_default_context(&mut self, cmd: VpExecutor) -> TmkResult<()>; +} + +pub trait VtlPlatformTrait { + /// Applies VTL protection to the supplied physical address range. + fn apply_vtl_protection_for_memory(&mut self, range: Range, vtl: Vtl) -> TmkResult<()>; + + /// Enables the given `vtl` on `vp_index` with a default context. + fn enable_vp_vtl_with_default_context(&mut self, vp_index: u32, vtl: Vtl) -> TmkResult<()>; + + /// Returns the VTL level the caller is currently executing in. + fn get_current_vtl(&self) -> TmkResult; + + /// Sets the default VTL context on `vp_index`. + fn set_default_ctx_to_vp(&mut self, vp_index: u32, vtl: Vtl) -> TmkResult<()>; + + /// Performs partition wide initialisation for a given `vtl`. + fn setup_partition_vtl(&mut self, vtl: Vtl) -> TmkResult<()>; + + /// Platform specific global VTL preparation (stage 2 translation, + /// EPT, etc.). + fn setup_vtl_protection(&mut self) -> TmkResult<()>; + + /// Switches the current hardware thread to the higher privileged VTL. + fn switch_to_high_vtl(&mut self); + + /// Switches the current hardware thread back to the lower privileged VTL. + fn switch_to_low_vtl(&mut self); +} + +pub trait X64PlatformTrait {} +pub trait Aarch64PlatformTrait {} + +pub struct VpExecutor { + vp_index: u32, + vtl: Vtl, + cmd: Option>, +} + +impl VpExecutor { + /// Creates a new executor targeting `vp_index` running in `vtl`. + pub fn new(vp_index: u32, vtl: Vtl) -> Self { + VpExecutor { + vp_index, + vtl, + cmd: None, + } + } + + /// Stores a closure `cmd` that will be executed on the target VP. + /// + /// The closure receives a mutable reference to the platform-specific + /// type `T` that implements `VtlPlatformTrait`. + pub fn command(mut self, cmd: impl FnOnce(&mut T) + 'static) -> Self { + self.cmd = Some(Box::new(cmd)); + self + } + + /// Extracts the tuple `(vp_index, vtl, cmd)` consuming `self`. + pub fn get(mut self) -> (u32, Vtl, Option>) { + let cmd = self.cmd.take(); + (self.vp_index, self.vtl, cmd) + } +} \ No newline at end of file diff --git a/opentmk/opentmk/src/hypercall.rs b/opentmk/opentmk/src/hypercall.rs new file mode 100644 index 0000000000..07cc09b4b2 --- /dev/null +++ b/opentmk/opentmk/src/hypercall.rs @@ -0,0 +1,750 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +//! Hypercall infrastructure. + +#![allow(dead_code)] +use arrayvec::ArrayVec; +use core::mem::size_of; +use core::sync::atomic::AtomicBool; +use core::sync::atomic::Ordering; +use hvdef::hypercall::EnablePartitionVtlFlags; +use hvdef::hypercall::HvInputVtl; +use hvdef::hypercall::InitialVpContextX64; +use hvdef::HvRegisterValue; +use hvdef::HvRegisterVsmPartitionConfig; +use hvdef::HvX64RegisterName; +use hvdef::Vtl; +use hvdef::HV_PAGE_SIZE; +use memory_range::MemoryRange; +use minimal_rt::arch::hypercall::{invoke_hypercall, HYPERCALL_PAGE}; +use zerocopy::FromBytes; +use zerocopy::IntoBytes; + +/// Page-aligned, page-sized buffer for use with hypercalls +#[repr(C, align(4096))] +struct HvcallPage { + buffer: [u8; HV_PAGE_SIZE as usize], +} + +pub fn invoke_hypercall_vtl(control: hvdef::hypercall::Control) { + // SAFETY: the caller guarantees the safety of this operation. + unsafe { + core::arch::asm! { + "call {hypercall_page}", + hypercall_page = sym HYPERCALL_PAGE, + inout("rcx") u64::from(control) => _, + in("rdx") 0, + in("rax") 0, + } + } +} + +impl HvcallPage { + pub const fn new() -> Self { + HvcallPage { + buffer: [0; HV_PAGE_SIZE as usize], + } + } + + /// Address of the hypercall page. + fn address(&self) -> u64 { + let addr = self.buffer.as_ptr() as u64; + + // These should be page-aligned + assert!(addr % HV_PAGE_SIZE == 0); + + addr + } +} + +/// Provides mechanisms to invoke hypercalls within the boot shim. +/// +/// This module defines the `HvCall` struct and associated methods to interact with +/// hypervisor functionalities through hypercalls. It includes utilities for managing +/// hypercall pages, setting and getting virtual processor (VP) registers, enabling +/// VTL (Virtual Trust Levels), and applying memory protections. +/// +/// # Overview +/// +/// - **Hypercall Pages**: Manages page-aligned buffers for hypercall input and output. +/// - **VP Registers**: Provides methods to set and get VP registers. +/// - **VTL Management**: Includes methods to enable VTLs, apply VTL protections, and +/// manage VTL-specific operations. +/// - **Memory Protections**: Supports applying VTL protections and accepting VTL2 pages. +/// +/// # Safety +/// +/// Many methods in this module involve unsafe operations, such as invoking hypercalls +/// or interacting with low-level memory structures. The caller must ensure the safety +/// of these operations by adhering to the requirements of the hypervisor and the +/// underlying architecture. +/// +/// # Usage +/// +/// This module is designed for use in single-threaded environments, such as the boot +/// shim. It uses static buffers for hypercall pages, so it is not thread-safe. +/// +/// # Features +/// +/// - **Architecture-Specific Implementations**: Some methods are only available for +/// specific architectures (e.g., `x86_64` or `aarch64`). +/// - **Error Handling**: Methods return `Result` types to handle hypervisor errors. +/// +/// # Examples +/// +/// ```rust +/// let mut hv_call = HvCall::new(); +/// hv_call.initialize(); +/// let vtl = hv_call.vtl(); +/// println!("Current VTL: {:?}", vtl); +/// hv_call.uninitialize(); +/// ``` +/// +/// # Modules and Types +/// +/// - `HvCall`: Main struct for managing hypercalls. +/// - `HvcallPage`: Struct for page-aligned buffers. +/// - `HwId`: Type alias for hardware IDs (APIC ID on `x86_64`, MPIDR on `aarch64`). +/// +/// # Notes +/// +/// - This module assumes the presence of a hypervisor that supports the required +/// hypercalls. +/// - The boot shim must ensure that hypercalls are invoked in a valid context. +/// Internally uses static buffers for the hypercall page, the input +/// page, and the output page, so this should not be used in any +/// multi-threaded capacity (which the boot shim currently is not). +pub struct HvCall { + input_page: HvcallPage, + output_page: HvcallPage, +} + +static HV_PAGE_INIT_STATUS: AtomicBool = AtomicBool::new(false); + +#[expect(unsafe_code)] +impl HvCall { + /// Hypercall to accept vtl2 pages from address start to end with VTL 2 + /// protections and no host visibility + #[cfg_attr(target_arch = "aarch64", allow(dead_code))] + pub fn accept_vtl2_pages( + &mut self, + range: MemoryRange, + memory_type: hvdef::hypercall::AcceptMemoryType, + ) -> Result<(), hvdef::HvError> { + const HEADER_SIZE: usize = size_of::(); + const MAX_INPUT_ELEMENTS: usize = (HV_PAGE_SIZE as usize - HEADER_SIZE) / size_of::(); + + let mut current_page = range.start_4k_gpn(); + while current_page < range.end_4k_gpn() { + let header = hvdef::hypercall::AcceptGpaPages { + partition_id: hvdef::HV_PARTITION_ID_SELF, + page_attributes: hvdef::hypercall::AcceptPagesAttributes::new() + .with_memory_type(memory_type.0) + .with_host_visibility(hvdef::hypercall::HostVisibilityType::PRIVATE) // no host visibility + .with_vtl_set(1 << 2), // applies vtl permissions for vtl 2 + vtl_permission_set: hvdef::hypercall::VtlPermissionSet { + vtl_permission_from_1: [0; hvdef::hypercall::HV_VTL_PERMISSION_SET_SIZE], + }, + gpa_page_base: current_page, + }; + + let remaining_pages = range.end_4k_gpn() - current_page; + let count = remaining_pages.min(MAX_INPUT_ELEMENTS as u64); + + let _ = header.write_to_prefix(self.input_page().buffer.as_mut_slice()); + + let output = self.dispatch_hvcall( + hvdef::HypercallCode::HvCallAcceptGpaPages, + Some(count as usize), + ); + + output.result()?; + + current_page += count; + } + + Ok(()) + } + + /// Hypercall to apply vtl protections to the pages from address start to end + #[cfg_attr(target_arch = "aarch64", allow(dead_code))] + pub fn apply_vtl2_protections(&mut self, range: MemoryRange) -> Result<(), hvdef::HvError> { + const HEADER_SIZE: usize = size_of::(); + const MAX_INPUT_ELEMENTS: usize = (HV_PAGE_SIZE as usize - HEADER_SIZE) / size_of::(); + + let header = hvdef::hypercall::ModifyVtlProtectionMask { + partition_id: hvdef::HV_PARTITION_ID_SELF, + map_flags: hvdef::HV_MAP_GPA_PERMISSIONS_NONE, + target_vtl: HvInputVtl::CURRENT_VTL, + reserved: [0; 3], + }; + + let mut current_page = range.start_4k_gpn(); + while current_page < range.end_4k_gpn() { + let remaining_pages = range.end_4k_gpn() - current_page; + let count = remaining_pages.min(MAX_INPUT_ELEMENTS as u64); + + let _ = header.write_to_prefix(self.input_page().buffer.as_mut_slice()); + + let mut input_offset = HEADER_SIZE; + for i in 0..count { + let page_num = current_page + i; + let _ = page_num.write_to_prefix(&mut self.input_page().buffer[input_offset..]); + input_offset += size_of::(); + } + + let output = self.dispatch_hvcall( + hvdef::HypercallCode::HvCallModifyVtlProtectionMask, + Some(count as usize), + ); + + output.result()?; + + current_page += count; + } + + Ok(()) + } + + /// Hypercall to apply vtl protections to the pages from address start to end + #[cfg_attr(target_arch = "x86_64", allow(dead_code))] + pub fn apply_vtl_protections( + &mut self, + range: MemoryRange, + vtl: Vtl, + ) -> Result<(), hvdef::HvError> { + const HEADER_SIZE: usize = size_of::(); + const MAX_INPUT_ELEMENTS: usize = (HV_PAGE_SIZE as usize - HEADER_SIZE) / size_of::(); + + let header = hvdef::hypercall::ModifyVtlProtectionMask { + partition_id: hvdef::HV_PARTITION_ID_SELF, + map_flags: hvdef::HV_MAP_GPA_PERMISSIONS_NONE, + target_vtl: HvInputVtl::new() + .with_target_vtl_value(vtl.into()) + .with_use_target_vtl(true), + reserved: [0; 3], + }; + + let mut current_page = range.start_4k_gpn(); + while current_page < range.end_4k_gpn() { + let remaining_pages = range.end_4k_gpn() - current_page; + let count = remaining_pages.min(MAX_INPUT_ELEMENTS as u64); + + let _ = header.write_to_prefix(self.input_page().buffer.as_mut_slice()); + + let mut input_offset = HEADER_SIZE; + for i in 0..count { + let page_num = current_page + i; + let _ = page_num.write_to_prefix(&mut self.input_page().buffer[input_offset..]); + input_offset += size_of::(); + } + + let output = self.dispatch_hvcall( + hvdef::HypercallCode::HvCallModifyVtlProtectionMask, + Some(count as usize), + ); + + output.result()?; + + current_page += count; + } + + Ok(()) + } + + /// Makes a hypercall. + /// rep_count is Some for rep hypercalls + fn dispatch_hvcall( + &mut self, + code: hvdef::HypercallCode, + rep_count: Option, + ) -> hvdef::hypercall::HypercallOutput { + let control: hvdef::hypercall::Control = hvdef::hypercall::Control::new() + .with_code(code.0) + .with_rep_count(rep_count.unwrap_or_default()); + + // SAFETY: Invoking hypercall per TLFS spec + unsafe { + invoke_hypercall( + control, + self.input_page().address(), + self.output_page().address(), + ) + } + } + + /// Enables a VTL for the specified partition. + pub fn enable_partition_vtl( + &mut self, + partition_id: u64, + target_vtl: Vtl, + ) -> Result<(), hvdef::HvError> { + let flags: EnablePartitionVtlFlags = EnablePartitionVtlFlags::new() + .with_enable_mbec(false) + .with_enable_supervisor_shadow_stack(false); + + let header = hvdef::hypercall::EnablePartitionVtl { + partition_id, + target_vtl: target_vtl.into(), + flags, + reserved_z0: 0, + reserved_z1: 0, + }; + + let _ = header.write_to_prefix(self.input_page().buffer.as_mut_slice()); + + let output = self.dispatch_hvcall(hvdef::HypercallCode::HvCallEnablePartitionVtl, None); + match output.result() { + Ok(()) | Err(hvdef::HvError::VtlAlreadyEnabled) => Ok(()), + err => err, + } + } + + /// Enables VTL protection for the specified VTL. + pub fn enable_vtl_protection( + &mut self, + vtl: HvInputVtl, + ) -> Result<(), hvdef::HvError> { + let hvreg = self.get_register( + HvX64RegisterName::VsmPartitionConfig.into(), + Some(vtl), + )?; + let mut hvreg: HvRegisterVsmPartitionConfig = + HvRegisterVsmPartitionConfig::from_bits(hvreg.as_u64()); + hvreg.set_enable_vtl_protection(true); + // hvreg.set_intercept_page(true); + // hvreg.set_default_vtl_protection_mask(0b11); + // hvreg.set_intercept_enable_vtl_protection(true); + let bits = hvreg.into_bits(); + let hvre: HvRegisterValue = HvRegisterValue::from(bits); + self.set_register( + HvX64RegisterName::VsmPartitionConfig.into(), + hvre, + Some(vtl), + ) + } + + #[cfg(target_arch = "x86_64")] + /// Enables a VTL for a specific virtual processor (VP) on x86_64. + pub fn enable_vp_vtl( + &mut self, + vp_index: u32, + target_vtl: Vtl, + vp_context: Option, + ) -> Result<(), hvdef::HvError> { + let header = hvdef::hypercall::EnableVpVtlX64 { + partition_id: hvdef::HV_PARTITION_ID_SELF, + vp_index, + target_vtl: target_vtl.into(), + reserved: [0; 3], + vp_vtl_context: vp_context.unwrap_or(zerocopy::FromZeros::new_zeroed()), + }; + + header + .write_to_prefix(self.input_page().buffer.as_mut_slice()) + .expect("size of enable_vp_vtl header is not correct"); + + let output = self.dispatch_hvcall(hvdef::HypercallCode::HvCallEnableVpVtl, None); + match output.result() { + Ok(()) | Err(hvdef::HvError::VtlAlreadyEnabled) => Ok(()), + err => err, + } + } + + /// Hypercall to enable VP VTL + #[cfg(target_arch = "aarch64")] + pub fn enable_vp_vtl(&mut self, vp_index: u32) -> Result<(), hvdef::HvError> { + let header = hvdef::hypercall::EnableVpVtlArm64 { + partition_id: hvdef::HV_PARTITION_ID_SELF, + vp_index, + // The VTL value here is just a u8 and not the otherwise usual + // HvInputVtl value. + target_vtl: Vtl::Vtl2.into(), + reserved: [0; 3], + vp_vtl_context: zerocopy::FromZeroes::new_zeroed(), + }; + + header.write_to_prefix(self.input_page().buffer.as_mut_slice()); + + let output = self.dispatch_hvcall(hvdef::HypercallCode::HvCallEnableVpVtl, None); + match output.result() { + Ok(()) | Err(hvdef::HvError::VtlAlreadyEnabled) => Ok(()), + err => err, + } + } + + #[cfg(target_arch = "x86_64")] + /// Hypercall to get the current VTL VP context + pub fn get_current_vtl_vp_context(&mut self) -> Result { + use HvX64RegisterName; + use zerocopy::FromZeros; + let mut context: InitialVpContextX64 = FromZeros::new_zeroed(); + context.cr0 = self + .get_register(HvX64RegisterName::Cr0.into(), None)? + .as_u64(); + context.cr3 = self + .get_register(HvX64RegisterName::Cr3.into(), None)? + .as_u64(); + context.cr4 = self + .get_register(HvX64RegisterName::Cr4.into(), None)? + .as_u64(); + context.rip = self + .get_register(HvX64RegisterName::Rip.into(), None)? + .as_u64(); + context.rsp = self + .get_register(HvX64RegisterName::Rsp.into(), None)? + .as_u64(); + context.rflags = self + .get_register(HvX64RegisterName::Rflags.into(), None)? + .as_u64(); + context.cs = self + .get_register(HvX64RegisterName::Cs.into(), None)? + .as_segment(); + context.ss = self + .get_register(HvX64RegisterName::Ss.into(), None)? + .as_segment(); + context.ds = self + .get_register(HvX64RegisterName::Ds.into(), None)? + .as_segment(); + context.es = self + .get_register(HvX64RegisterName::Es.into(), None)? + .as_segment(); + context.fs = self + .get_register(HvX64RegisterName::Fs.into(), None)? + .as_segment(); + context.gs = self + .get_register(HvX64RegisterName::Gs.into(), None)? + .as_segment(); + context.gdtr = self + .get_register(HvX64RegisterName::Gdtr.into(), None)? + .as_table(); + context.idtr = self + .get_register(HvX64RegisterName::Idtr.into(), None)? + .as_table(); + context.tr = self + .get_register(HvX64RegisterName::Tr.into(), None)? + .as_segment(); + context.efer = self + .get_register(HvX64RegisterName::Efer.into(), None)? + .as_u64(); + Ok(context) + } + + /// Hypercall for setting a register to a value. + pub fn get_register( + &mut self, + name: hvdef::HvRegisterName, + vtl: Option, + ) -> Result { + const HEADER_SIZE: usize = size_of::(); + + let header = hvdef::hypercall::GetSetVpRegisters { + partition_id: hvdef::HV_PARTITION_ID_SELF, + vp_index: hvdef::HV_VP_INDEX_SELF, + target_vtl: vtl.unwrap_or(HvInputVtl::CURRENT_VTL), + rsvd: [0; 3], + }; + + let _ = header.write_to_prefix(self.input_page().buffer.as_mut_slice()); + let _ = name.write_to_prefix(&mut self.input_page().buffer[HEADER_SIZE..]); + + let output = self.dispatch_hvcall(hvdef::HypercallCode::HvCallGetVpRegisters, Some(1)); + output.result()?; + let value = HvRegisterValue::read_from_prefix(&self.output_page().buffer).unwrap(); + + Ok(value.0) + } + + /// Get the corresponding VP indices from a list of VP hardware IDs (APIC + /// IDs on x64, MPIDR on ARM64). + /// + /// This always queries VTL0, since the hardware IDs are the same across the + /// VTLs in practice, and the hypercall only succeeds for VTL2 once VTL2 has + /// been enabled (which it might not be at this point). + pub fn get_vp_index_from_hw_id( + &mut self, + hw_ids: &[HwId], + output: &mut ArrayVec, + ) -> Result<(), hvdef::HvError> { + let header = hvdef::hypercall::GetVpIndexFromApicId { + partition_id: hvdef::HV_PARTITION_ID_SELF, + target_vtl: 0, + reserved: [0; 7], + }; + + // Split the call up to avoid exceeding the hypercall input/output size limits. + const MAX_PER_CALL: usize = 512; + + for hw_ids in hw_ids.chunks(MAX_PER_CALL) { + let _ = header.write_to_prefix(self.input_page().buffer.as_mut_slice()); + let _ = hw_ids.write_to_prefix(&mut self.input_page().buffer[header.as_bytes().len()..]); + + // SAFETY: The input header and rep slice are the correct types for this hypercall. + // The hypercall output is validated right after the hypercall is issued. + let r = self.dispatch_hvcall( + hvdef::HypercallCode::HvCallGetVpIndexFromApicId, + Some(hw_ids.len()), + ); + + let n = r.elements_processed() as usize; + + output.extend( + <[u32]>::ref_from_bytes(&mut self.output_page().buffer[..n * 4]) + .unwrap() + .iter() + .copied(), + ); + r.result()?; + assert_eq!(n, hw_ids.len()); + } + + Ok(()) + } + + /// Initializes the hypercall interface. + pub fn initialize(&mut self) { + let init = HV_PAGE_INIT_STATUS.load(Ordering::SeqCst); + if init { + return; + } + // TODO: revisit os id value. For now, use 1 (which is what UEFI does) + let guest_os_id = hvdef::hypercall::HvGuestOsMicrosoft::new().with_os_id(1); + crate::arch::hypercall::initialize(guest_os_id.into()); + + HV_PAGE_INIT_STATUS.swap(true, Ordering::SeqCst); + } + + /// Returns a mutable reference to the hypercall input page. + fn input_page(&mut self) -> &mut HvcallPage { + &mut self.input_page + } + + /// Creates a new `HvCall` instance. + pub const fn new() -> Self { + HvCall { + input_page: HvcallPage::new(), + output_page: HvcallPage::new(), + } + } + + /// Returns a mutable reference to the hypercall output page. + fn output_page(&mut self) -> &mut HvcallPage { + &mut self.output_page + } + + /// Hypercall for setting a register to a value. + pub fn set_register( + &mut self, + name: hvdef::HvRegisterName, + value: HvRegisterValue, + vtl: Option, + ) -> Result<(), hvdef::HvError> { + const HEADER_SIZE: usize = size_of::(); + + let header = hvdef::hypercall::GetSetVpRegisters { + partition_id: hvdef::HV_PARTITION_ID_SELF, + vp_index: hvdef::HV_VP_INDEX_SELF, + target_vtl: vtl.unwrap_or(HvInputVtl::CURRENT_VTL), + rsvd: [0; 3], + }; + + let _ = header.write_to_prefix(self.input_page().buffer.as_mut_slice()); + + let reg = hvdef::hypercall::HvRegisterAssoc { + name, + pad: Default::default(), + value, + }; + + let _ = reg.write_to_prefix(&mut self.input_page().buffer[HEADER_SIZE..]); + + let output = self.dispatch_hvcall(hvdef::HypercallCode::HvCallSetVpRegisters, Some(1)); + + output.result() + } + + /// Sets multiple virtual processor (VP) registers for a given VP and VTL. + pub fn set_vp_registers( + &mut self, + vp: u32, + vtl: Option, + vp_context: Option, + ) -> Result<(), hvdef::HvError> { + const HEADER_SIZE: usize = size_of::(); + + let header = hvdef::hypercall::GetSetVpRegisters { + partition_id: hvdef::HV_PARTITION_ID_SELF, + vp_index: vp, + target_vtl: vtl.unwrap_or(HvInputVtl::CURRENT_VTL), + rsvd: [0; 3], + }; + + let _ = header.write_to_prefix(self.input_page().buffer.as_mut_slice()); + + let mut input_offset = HEADER_SIZE; + + let mut count = 0; + let mut write_reg = |reg_name: hvdef::HvRegisterName, reg_value: HvRegisterValue| { + let reg = hvdef::hypercall::HvRegisterAssoc { + name: reg_name, + pad: Default::default(), + value: reg_value, + }; + + let _ = reg.write_to_prefix(&mut self.input_page().buffer[input_offset..]); + + input_offset += size_of::(); + count += 1; + }; + // pub msr_cr_pat: u64, + + write_reg( + HvX64RegisterName::Cr0.into(), + vp_context.unwrap().cr0.into(), + ); + write_reg( + HvX64RegisterName::Cr3.into(), + vp_context.unwrap().cr3.into(), + ); + write_reg( + HvX64RegisterName::Cr4.into(), + vp_context.unwrap().cr4.into(), + ); + write_reg( + HvX64RegisterName::Rip.into(), + vp_context.unwrap().rip.into(), + ); + write_reg( + HvX64RegisterName::Rsp.into(), + vp_context.unwrap().rsp.into(), + ); + write_reg( + HvX64RegisterName::Rflags.into(), + vp_context.unwrap().rflags.into(), + ); + write_reg( + HvX64RegisterName::Cs.into(), + vp_context.unwrap().cs.into(), + ); + write_reg( + HvX64RegisterName::Ss.into(), + vp_context.unwrap().ss.into(), + ); + write_reg( + HvX64RegisterName::Ds.into(), + vp_context.unwrap().ds.into(), + ); + write_reg( + HvX64RegisterName::Es.into(), + vp_context.unwrap().es.into(), + ); + write_reg( + HvX64RegisterName::Fs.into(), + vp_context.unwrap().fs.into(), + ); + write_reg( + HvX64RegisterName::Gs.into(), + vp_context.unwrap().gs.into(), + ); + write_reg( + HvX64RegisterName::Gdtr.into(), + vp_context.unwrap().gdtr.into(), + ); + write_reg( + HvX64RegisterName::Idtr.into(), + vp_context.unwrap().idtr.into(), + ); + write_reg( + HvX64RegisterName::Ldtr.into(), + vp_context.unwrap().ldtr.into(), + ); + write_reg( + HvX64RegisterName::Tr.into(), + vp_context.unwrap().tr.into(), + ); + write_reg( + HvX64RegisterName::Efer.into(), + vp_context.unwrap().efer.into(), + ); + + let output = self.dispatch_hvcall(hvdef::HypercallCode::HvCallSetVpRegisters, Some(count)); + + output.result() + } + + #[cfg(target_arch = "x86_64")] + /// Starts a virtual processor (VP) with the specified VTL and context on x86_64. + pub fn start_virtual_processor( + &mut self, + vp_index: u32, + target_vtl: Vtl, + vp_context: Option, + ) -> Result<(), hvdef::HvError> { + let header = hvdef::hypercall::StartVirtualProcessorX64 { + partition_id: hvdef::HV_PARTITION_ID_SELF, + vp_index, + target_vtl: target_vtl.into(), + vp_context: vp_context.unwrap_or(zerocopy::FromZeros::new_zeroed()), + rsvd0: 0u8, + rsvd1: 0u16, + }; + + header + .write_to_prefix(self.input_page().buffer.as_mut_slice()) + .expect("size of start_virtual_processor header is not correct"); + + let output = self.dispatch_hvcall(hvdef::HypercallCode::HvCallStartVirtualProcessor, None); + match output.result() { + Ok(()) => Ok(()), + err => panic!("Failed to start virtual processor: {:?}", err), + } + } + + /// Call before jumping to kernel. + pub fn uninitialize(&mut self) { + let init = HV_PAGE_INIT_STATUS.load(Ordering::SeqCst); + if init { + crate::arch::hypercall::uninitialize(); + HV_PAGE_INIT_STATUS.swap(false, Ordering::SeqCst); + } + } + + /// Returns the environment's VTL. + pub fn vtl(&mut self) -> Vtl { + self + .get_register(hvdef::HvAllArchRegisterName::VsmVpStatus.into(), None) + .map_or(Vtl::Vtl0, |status| { + hvdef::HvRegisterVsmVpStatus::from(status.as_u64()) + .active_vtl() + .try_into() + .unwrap() + }) + } + + /// Invokes the HvCallVtlCall hypercall. + pub fn vtl_call() { + let control: hvdef::hypercall::Control = hvdef::hypercall::Control::new() + .with_code(hvdef::HypercallCode::HvCallVtlCall.0) + .with_rep_count(0); + invoke_hypercall_vtl(control); + } + + /// Invokes the HvCallVtlReturn hypercall. + pub fn vtl_return() { + let control: hvdef::hypercall::Control = hvdef::hypercall::Control::new() + .with_code(hvdef::HypercallCode::HvCallVtlReturn.0) + .with_rep_count(0); + invoke_hypercall_vtl(control); + } +} + +/// The "hardware ID" used for [`HvCall::get_vp_index_from_hw_id`]. This is the +/// APIC ID on x64. +#[cfg(target_arch = "x86_64")] +pub type HwId = u32; + +/// The "hardware ID" used for [`HvCall::get_vp_index_from_hw_id`]. This is the +/// MPIDR on ARM64. +#[cfg(target_arch = "aarch64")] +pub type HwId = u64; diff --git a/opentmk/opentmk/src/main.rs b/opentmk/opentmk/src/main.rs new file mode 100644 index 0000000000..dc331ea99d --- /dev/null +++ b/opentmk/opentmk/src/main.rs @@ -0,0 +1,26 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. +#![no_std] +#![allow(unsafe_code)] +#![feature(abi_x86_interrupt)] +#![feature(naked_functions)] + +#![doc = include_str!("../README.md")] + +#![cfg_attr(all(not(test), target_os = "uefi"), no_main)] +#![cfg_attr(all(not(test), target_os = "uefi"), no_std)] + +// Actual entrypoint is `uefi::uefi_main`, via the `#[entry]` macro +#[cfg(any(test, not(target_os = "uefi")))] +fn main() {} + +#[macro_use] +extern crate alloc; + +mod uefi; +pub mod arch; +pub mod tmk_assert; +pub mod tmk_logger; +pub mod hypercall; +pub mod context; +pub mod tmkdefs; \ No newline at end of file diff --git a/opentmk/opentmk/src/tests/hv_misc.rs b/opentmk/opentmk/src/tests/hv_misc.rs new file mode 100644 index 0000000000..8379c344a0 --- /dev/null +++ b/opentmk/opentmk/src/tests/hv_misc.rs @@ -0,0 +1,145 @@ +// WIP : This test is not yet complete and is not expected to pass. +// +// This test is to verify that the VTL protections are working as expected. +// The stack values in VTL0 are changing after interrupt handling in VTL1. +#![allow(warnings)] +use crate::slog::{AssertOption, AssertResult}; +use crate::sync::{Channel, Receiver, Sender}; +use crate::uefi::alloc::{ALLOCATOR, SIZE_1MB}; +use crate::uefi::{context, hypvctx}; +use crate::{infolog, tmk_assert}; +use ::alloc::boxed::Box; +use alloc::sync::Arc; +use ::alloc::vec::Vec; +use context::{TestCtxTrait, VpExecutor}; +use hypvctx::HvTestCtx; +use core::alloc::{GlobalAlloc, Layout}; +use core::arch::asm; +use core::cell::RefCell; +use core::ops::Range; +use core::sync::atomic::{AtomicI32, Ordering}; +use hvdef::hypercall::HvInputVtl; +use hvdef::{HvAllArchRegisterName, HvRegisterVsmVpStatus, HvX64RegisterName, Vtl}; + +static mut HEAPX: RefCell<*mut u8> = RefCell::new(0 as *mut u8); +static mut CON: AtomicI32 = AtomicI32::new(0); + +pub fn exec(_opt: Option<()>, ctx: Arc>) { + log::info!("ctx ptr: {:p}", &ctx as *const _); + + let mut cpy = ctx.clone(); + + let mut ctx = ctx.borrow_mut(); + + let mut vp_count = ctx.get_vp_count(); + tmk_assert!(vp_count == 8, "vp count should be 8"); + + ctx.setup_interrupt_handler(); + + log::info!("set intercept handler successfully!"); + + ctx.setup_partition_vtl(Vtl::Vtl1); + + ctx.start_on_vp( + VpExecutor::new(0, Vtl::Vtl1).command(move |ctx: &mut dyn TestCtxTrait| { + log::info!("successfully started running VTL1 on vp0."); + ctx.setup_secure_intercept(0x30); + ctx.set_interupt_idx(0x30, || { + log::info!("interrupt fired!"); + + // let mut hv_test_ctx: HvTestCtx = HvTestCtx::new(); + // hv_test_ctx.init(); + + // let c = hv_test_ctx.get_register(HvAllArchRegisterName::VsmVpStatus.0); + + // let cp: HvRegisterVsmVpStatus = HvRegisterVsmVpStatus::from_bits(c as u64); + + // log::info!("VSM VP Status: {:?}", cp); + + log::info!("interrupt handled!"); + }); + + let layout = + Layout::from_size_align(SIZE_1MB, 4096).expect("msg: failed to create layout"); + let ptr = unsafe { ALLOCATOR.alloc(layout) }; + log::info!("allocated some memory in the heap from vtl1"); + unsafe { + let mut z = HEAPX.borrow_mut(); + *z = ptr; + *ptr.add(10) = 0xAA; + } + + let size = layout.size(); + ctx.setup_vtl_protection(); + + log::info!("enabled vtl protections for the partition."); + + let range = Range { + start: ptr as u64, + end: ptr as u64 + size as u64, + }; + + ctx.apply_vtl_protection_for_memory(range, Vtl::Vtl1); + + log::info!("moving to vtl0 to attempt to read the heap memory"); + + ctx.switch_to_low_vtl(); + }), + ); + + ctx.queue_command_vp(VpExecutor::new(0, Vtl::Vtl1).command(move |ctx| { + log::info!("successfully started running VTL1 on vp0."); + ctx.switch_to_low_vtl(); + })); + log::info!("ctx ptr: {:p}", &ctx as *const _); + log::info!("_opt ptr: {:p}", &_opt as *const _); + let mut l = 0u64; + unsafe { asm!("mov {}, rsp", out(reg) l) }; + log::info!("rsp: 0x{:x}", l); + unsafe { + log::info!("Attempting to read heap memory from vtl0"); + let heapx = *HEAPX.borrow(); + let val = *(heapx.add(10)); + log::info!( + "reading mutated heap memory from vtl0(it should not be 0xAA): 0x{:x}", + val + ); + tmk_assert!( + val != 0xAA, + "heap memory should not be accessible from vtl0" + ); + } + + unsafe { asm!("mov {}, rsp", out(reg) l) }; + log::info!("rsp: 0x{:x}", l); + + // let (mut tx, mut rx) = Channel::new(1); + // { + // let mut tx = tx.clone(); + // ctx.start_on_vp(VpExecutor::new(2, Vtl::Vtl0).command( + // move |ctx: &mut dyn TestCtxTrait| { + // log::info!("Hello form vtl0 on vp2!"); + // tx.send(()); + // }, + // )); + // } + + drop(ctx); + + let mut ctx = cpy.borrow_mut(); + // let mut ctx = cpy.borrow_mut(); + log::info!("ctx ptr: {:p}", &ctx as *const _); + log::info!("opt ptr: {:p}", &_opt as *const _); + let c = ctx.get_vp_count(); + + tmk_assert!(c == 8, "vp count should be 8"); + + // rx.recv(); + + log::info!("we are in vtl0 now!"); + log::info!("we reached the end of the test"); + loop { + + } + +} \ No newline at end of file diff --git a/opentmk/opentmk/src/tests/hv_processor.rs b/opentmk/opentmk/src/tests/hv_processor.rs new file mode 100644 index 0000000000..64439039db --- /dev/null +++ b/opentmk/opentmk/src/tests/hv_processor.rs @@ -0,0 +1,74 @@ +use hvdef::Vtl; + +use crate::{ + criticallog, infolog, + tmk_assert, + uefi::context::{TestCtxTrait, VpExecutor}, +}; + +pub fn exec(ctx: &mut dyn TestCtxTrait) { + ctx.setup_interrupt_handler(); + ctx.setup_partition_vtl(Vtl::Vtl1); + + let vp_count = ctx.get_vp_count(); + tmk_assert!(vp_count == 8, "vp count should be 8"); + + // Testing BSP VTL Bringup + { + let (tx, rx) = crate::sync::Channel::new().split(); + ctx.start_on_vp(VpExecutor::new(0, Vtl::Vtl1).command( + move |ctx: &mut dyn TestCtxTrait| { + let vp = ctx.get_current_vp(); + log::info!("vp: {}", vp); + tmk_assert!(vp == 0, "vp should be equal to 0"); + + let vtl = ctx.get_current_vtl(); + log::info!("vtl: {:?}", vtl); + tmk_assert!(vtl == Vtl::Vtl1, "vtl should be Vtl1 for BSP"); + _ = tx.send(()); + ctx.switch_to_low_vtl(); + }, + )); + _ = rx.recv(); + } + + for i in 1..vp_count { + // Testing VTL1 + { + let (tx, rx) = crate::sync::Channel::new().split(); + ctx.start_on_vp(VpExecutor::new(i, Vtl::Vtl1).command( + move |ctx: &mut dyn TestCtxTrait| { + let vp = ctx.get_current_vp(); + log::info!("vp: {}", vp); + tmk_assert!(vp == i, format!("vp should be equal to {}", i)); + + let vtl = ctx.get_current_vtl(); + log::info!("vtl: {:?}", vtl); + tmk_assert!(vtl == Vtl::Vtl1, format!("vtl should be Vtl1 for VP {}", i)); + _ = tx.send(()); + }, + )); + _ = rx.recv(); + } + + // Testing VTL0 + { + let (tx, rx) = crate::sync::Channel::new().split(); + ctx.start_on_vp(VpExecutor::new(i, Vtl::Vtl0).command( + move |ctx: &mut dyn TestCtxTrait| { + let vp = ctx.get_current_vp(); + log::info!("vp: {}", vp); + tmk_assert!(vp == i, format!("vp should be equal to {}", i)); + + let vtl = ctx.get_current_vtl(); + log::info!("vtl: {:?}", vtl); + tmk_assert!(vtl == Vtl::Vtl0, format!("vtl should be Vtl0 for VP {}", i)); + _ = tx.send(()); + }, + )); + _ = rx.recv(); + } + } + + log::error!("All VPs have been tested"); +} diff --git a/opentmk/opentmk/src/tests/mod.rs b/opentmk/opentmk/src/tests/mod.rs new file mode 100644 index 0000000000..2457eb039e --- /dev/null +++ b/opentmk/opentmk/src/tests/mod.rs @@ -0,0 +1,9 @@ +mod hv_processor; +mod hv_misc; + +use crate::uefi::hypvctx::HvTestCtx; + +pub fn run_test() { + let mut ctx = HvTestCtx::new(); + hv_processor::exec(&mut ctx); +} \ No newline at end of file diff --git a/opentmk/opentmk/src/tmk_assert.rs b/opentmk/opentmk/src/tmk_assert.rs new file mode 100644 index 0000000000..d4eb9f4433 --- /dev/null +++ b/opentmk/opentmk/src/tmk_assert.rs @@ -0,0 +1,81 @@ +use alloc::string::{String, ToString}; +use core::fmt::Write; +use serde::Serialize; + +#[derive(Serialize)] +struct AssertJson<'a, T> +where + T: Serialize, +{ + #[serde(rename = "type")] + type_: &'a str, + level: &'a str, + message: &'a str, + line: String, + assertion_result: bool, + testname: &'a T, +} + +impl<'a, T> AssertJson<'a, T> +where + T: Serialize, +{ + fn new( + type_: &'a str, + level: &'a str, + message: &'a str, + line: String, + assertion_result: bool, + testname: &'a T, + ) -> Self { + Self { + type_, + level, + message, + line, + assertion_result, + testname, + } + } +} + +pub fn format_assert_json_string( + s: &str, + terminate_new_line: bool, + line: String, + assert_result: bool, + testname: &T, +) -> String +where + T: Serialize, +{ + let assert_json = AssertJson::new("assert", "WARN", s, line, assert_result, testname); + + let out = serde_json::to_string(&assert_json).expect("Failed to serialize assert JSON"); + let mut out = out.to_string(); + if terminate_new_line { + out.push('\n'); + } + return out; +} + +pub fn write_str(s: &str) { + let _ = crate::tmk_logger::LOGGER.get_writter().write_str(s); +} + +#[macro_export] +macro_rules! tmk_assert { + ($condition:expr, $message:expr) => {{ + let file = core::file!(); + let line = line!(); + let file_line = format!("{}:{}", file, line); + let expn = stringify!($condition); + let result: bool = $condition; + let js = + crate::tmk_assert::format_assert_json_string(&expn, true, file_line, result, &$message); + crate::tmk_assert::write_str(&js); + if !result { + panic!("Assertion failed: {}", $message); + } + }}; +} \ No newline at end of file diff --git a/opentmk/opentmk/src/tmk_logger.rs b/opentmk/opentmk/src/tmk_logger.rs new file mode 100644 index 0000000000..4c974103b4 --- /dev/null +++ b/opentmk/opentmk/src/tmk_logger.rs @@ -0,0 +1,96 @@ +use core::fmt::Write; + +use alloc::{ + fmt::format, + string::{String, ToString}, +}; +use log::SetLoggerError; +use sync_nostd::{Mutex, MutexGuard}; + +use crate::arch::serial::{InstrIoAccess, Serial}; +use serde::Serialize; + +#[derive(Serialize)] +struct LogEntry { + log_type: &'static str, + level: String, + message: String, + line: String, +} + +impl LogEntry { + fn new(level: log::Level, message: &String, line: &String) -> Self { + LogEntry { + log_type: "log", + level: level.as_str().to_string(), + message: message.clone(), + line: line.clone(), + } + } +} + +pub fn format_log_string_to_json( + message: &String, + line: &String, + terminate_new_line: bool, + level: log::Level, +) -> String { + let log_entry = LogEntry::new(level, message, line); + let out = serde_json::to_string(&log_entry).unwrap(); + let mut out = out.to_string(); + if terminate_new_line { + out.push('\n'); + } + return out; +} + +pub struct TmkLogger { + pub writter: T, +} + +impl TmkLogger> +where + T: Write + Send, +{ + pub const fn new(provider: T) -> Self { + TmkLogger { + writter: Mutex::new(provider), + } + } + + pub fn get_writter(&self) -> MutexGuard<'_, T> + where + T: Write + Send, + { + self.writter.lock() + } +} + +impl log::Log for TmkLogger> +where + T: Write + Send, +{ + fn enabled(&self, _metadata: &log::Metadata<'_>) -> bool { + true + } + + fn log(&self, record: &log::Record<'_>) { + let str = format(*record.args()); + let line = format!( + "{}:{}", + record.file().unwrap_or_default(), + record.line().unwrap_or_default() + ); + let str = format_log_string_to_json(&str, &line, true, record.level()); + _ = self.writter.lock().write_str(str.as_str()); + } + + fn flush(&self) {} +} + +pub static LOGGER: TmkLogger>> = + TmkLogger::new(Serial::new(InstrIoAccess {})); + +pub fn init() -> Result<(), SetLoggerError> { + log::set_logger(&LOGGER).map(|()| log::set_max_level(log::LevelFilter::Debug)) +} diff --git a/opentmk/opentmk/src/tmkdefs.rs b/opentmk/opentmk/src/tmkdefs.rs new file mode 100644 index 0000000000..57f9faaa3f --- /dev/null +++ b/opentmk/opentmk/src/tmkdefs.rs @@ -0,0 +1,93 @@ +#[derive(Copy, Clone, Debug, Eq, PartialEq)] +pub enum TmkErrorType { + AllocationFailed, + InvalidParameter, + EnableVtlFailed, + SetDefaultCtxFailed, + StartVpFailed, + QueueCommandFailed, + SetupVtlProtectionFailed, + SetupPartitionVtlFailed, + SetupInterruptHandlerFailed, + SetInterruptIdxFailed, + SetupSecureInterceptFailed, + ApplyVtlProtectionForMemoryFailed, + ReadMsrFailed, + WriteMsrFailed, + GetRegisterFailed, + InvalidHypercallCode, + InvalidHypercallInput, + InvalidAlignment, + AccessDenied, + InvalidPartitionState, + OperationDenied, + UnknownProperty, + PropertyValueOutOfRange, + InsufficientMemory, + PartitionTooDeep, + InvalidPartitionId, + InvalidVpIndex, + NotFound, + InvalidPortId, + InvalidConnectionId, + InsufficientBuffers, + NotAcknowledged, + InvalidVpState, + Acknowledged, + InvalidSaveRestoreState, + InvalidSynicState, + ObjectInUse, + InvalidProximityDomainInfo, + NoData, + Inactive, + NoResources, + FeatureUnavailable, + PartialPacket, + ProcessorFeatureNotSupported, + ProcessorCacheLineFlushSizeIncompatible, + InsufficientBuffer, + IncompatibleProcessor, + InsufficientDeviceDomains, + CpuidFeatureValidationError, + CpuidXsaveFeatureValidationError, + ProcessorStartupTimeout, + SmxEnabled, + InvalidLpIndex, + InvalidRegisterValue, + InvalidVtlState, + NxNotDetected, + InvalidDeviceId, + InvalidDeviceState, + PendingPageRequests, + PageRequestInvalid, + KeyAlreadyExists, + DeviceAlreadyInDomain, + InvalidCpuGroupId, + InvalidCpuGroupState, + OperationFailed, + NotAllowedWithNestedVirtActive, + InsufficientRootMemory, + EventBufferAlreadyFreed, + Timeout, + VtlAlreadyEnabled, + UnknownRegisterName, +} + +#[derive(Copy, Clone, Debug, Eq, PartialEq)] +pub struct TmkError(pub TmkErrorType); + +pub type TmkResult = Result; + +impl core::error::Error for TmkError {} + +impl core::fmt::Display for TmkError { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!(f, "TmkError({:?})", self.0) + } +} + +impl From for TmkError { + fn from(e: TmkErrorType) -> Self { + TmkError(e) + } +} diff --git a/opentmk/opentmk/src/uefi/alloc.rs b/opentmk/opentmk/src/uefi/alloc.rs new file mode 100644 index 0000000000..f6127573f7 --- /dev/null +++ b/opentmk/opentmk/src/uefi/alloc.rs @@ -0,0 +1,101 @@ +use core::{alloc::GlobalAlloc, cell::RefCell}; + +use linked_list_allocator::LockedHeap; +use sync_nostd::Mutex; +use uefi::{ + allocator::Allocator, + boot::{self, AllocateType, MemoryType}, +}; + +pub const SIZE_1MB: usize = 1024 * 1024; +const PAGE_SIZE: usize = 4096; + +#[global_allocator] +pub static ALLOCATOR: MemoryAllocator = MemoryAllocator { + use_locked_heap: Mutex::new(RefCell::new(false)), + locked_heap: LockedHeap::empty(), + uefi_allocator: Allocator {}, +}; + +pub struct MemoryAllocator { + use_locked_heap: Mutex>, + locked_heap: LockedHeap, + uefi_allocator: Allocator, +} + +#[expect(unsafe_code)] +unsafe impl GlobalAlloc for MemoryAllocator { + #[allow(unsafe_code)] + unsafe fn alloc(&self, layout: core::alloc::Layout) -> *mut u8 { + if *self.use_locked_heap.lock().borrow() { + unsafe { self.locked_heap.alloc(layout) } + } else { + unsafe { self.uefi_allocator.alloc(layout) } + } + } + + unsafe fn dealloc(&self, ptr: *mut u8, layout: core::alloc::Layout) { + if *self.use_locked_heap.lock().borrow() { + unsafe { self.locked_heap.dealloc(ptr, layout) } + } else { + unsafe { self.uefi_allocator.dealloc(ptr, layout) } + } + } + + unsafe fn alloc_zeroed(&self, layout: core::alloc::Layout) -> *mut u8 { + if *self.use_locked_heap.lock().borrow() { + unsafe { self.locked_heap.alloc_zeroed(layout) } + } else { + unsafe { self.uefi_allocator.alloc_zeroed(layout) } + } + } + + unsafe fn realloc( + &self, + ptr: *mut u8, + layout: core::alloc::Layout, + new_size: usize, + ) -> *mut u8 { + if *self.use_locked_heap.lock().borrow() { + unsafe { self.locked_heap.realloc(ptr, layout, new_size) } + } else { + unsafe { self.uefi_allocator.realloc(ptr, layout, new_size) } + } + } +} + +impl MemoryAllocator { + pub fn init(&self, size: usize) -> bool { + let pages = ((SIZE_1MB * size) / 4096) + 1; + let size = pages * 4096; + let mem: Result, uefi::Error> = boot::allocate_pages( + AllocateType::AnyPages, + MemoryType::BOOT_SERVICES_DATA, + pages, + ); + if mem.is_err() { + return false; + } + let ptr = mem.unwrap().as_ptr(); + unsafe { + self.locked_heap.lock().init(ptr, size); + } + *self.use_locked_heap.lock().borrow_mut() = true; + return true; + } + + #[allow(dead_code)] + pub fn get_page_alligned_memory(&self, size: usize) -> *mut u8 { + let pages = ((SIZE_1MB * size) / PAGE_SIZE) + 1; + let mem: Result, uefi::Error> = boot::allocate_pages( + AllocateType::AnyPages, + MemoryType::BOOT_SERVICES_DATA, + pages, + ); + if mem.is_err() { + return core::ptr::null_mut(); + } + let ptr = mem.unwrap().as_ptr(); + return ptr; + } +} diff --git a/opentmk/opentmk/src/uefi/hypvctx.rs b/opentmk/opentmk/src/uefi/hypvctx.rs new file mode 100644 index 0000000000..10139bb25d --- /dev/null +++ b/opentmk/opentmk/src/uefi/hypvctx.rs @@ -0,0 +1,548 @@ +use crate::uefi::alloc::ALLOCATOR; +use crate::{ + context::{ + InterruptPlatformTrait, MsrPlatformTrait, SecureInterceptPlatformTrait, + VirtualProcessorPlatformTrait, VpExecutor, VtlPlatformTrait, + }, + hypercall::HvCall, + tmkdefs::{TmkError, TmkErrorType, TmkResult}, +}; + +use alloc::boxed::Box; +use alloc::collections::linked_list::LinkedList; +use alloc::collections::{btree_map::BTreeMap, btree_set::BTreeSet}; +use core::alloc::{GlobalAlloc, Layout}; +use core::arch::asm; +use core::ops::Range; +use hvdef::hypercall::{HvInputVtl, InitialVpContextX64}; +use hvdef::Vtl; +use memory_range::MemoryRange; +use minimal_rt::arch::msr::{read_msr, write_msr}; +use sync_nostd::Mutex; + +const ALIGNMENT: usize = 4096; + +type ComandTable = BTreeMap, Vtl)>>; +static mut CMD: Mutex = Mutex::new(BTreeMap::new()); + +#[allow(static_mut_refs)] +fn cmdt() -> &'static Mutex { + unsafe { &CMD } +} + +fn register_command_queue(vp_index: u32) { + log::debug!("registering command queue for vp: {}", vp_index); + if cmdt().lock().get(&vp_index).is_none() { + cmdt().lock().insert(vp_index, LinkedList::new()); + log::debug!("registered command queue for vp: {}", vp_index); + } else { + log::debug!("command queue already registered for vp: {}", vp_index); + } +} + +pub struct HvTestCtx { + pub hvcall: HvCall, + pub vp_runing: BTreeSet, + pub my_vp_idx: u32, + pub my_vtl: Vtl, +} + +impl SecureInterceptPlatformTrait for HvTestCtx { + /// Configure the Secure Interrupt Message Page (SIMP) and the first + /// SynIC interrupt (SINT0) so that the hypervisor can vector + /// hypervisor side notifications back to the guest. + /// Returns [`TmkResult::Err`] if the allocation of the SIMP buffer fails. + fn setup_secure_intercept(&mut self, interrupt_idx: u8) -> TmkResult<()> { + let layout = Layout::from_size_align(4096, ALIGNMENT) + .or_else(|_| Err(TmkError(TmkErrorType::AllocationFailed)))?; + + let ptr = unsafe { ALLOCATOR.alloc(layout) }; + let gpn = (ptr as u64) >> 12; + let reg = (gpn << 12) | 0x1; + + unsafe { write_msr(hvdef::HV_X64_MSR_SIMP, reg.into()) }; + log::info!("Successfuly set the SIMP register."); + + let reg = unsafe { read_msr(hvdef::HV_X64_MSR_SINT0) }; + let mut reg: hvdef::HvSynicSint = reg.into(); + reg.set_vector(interrupt_idx); + reg.set_masked(false); + reg.set_auto_eoi(true); + + self.write_msr(hvdef::HV_X64_MSR_SINT0, reg.into())?; + log::info!("Successfuly set the SINT0 register."); + Ok(()) + } +} + +impl InterruptPlatformTrait for HvTestCtx { + /// Install an interrupt handler for the supplied vector on x86-64. + /// For non-x86-64 targets the call returns + /// [`TmkErrorType::NotImplemented`]. + fn set_interrupt_idx(&mut self, interrupt_idx: u8, handler: fn()) -> TmkResult<()> { + #[cfg(target_arch = "x86_64")] + { + crate::arch::interrupt::set_handler(interrupt_idx, handler); + Ok(()) + } + + #[cfg(not(target_arch = "x86_64"))] + { + Err(TmkError(TmkErrorType::NotImplemented)) + } + } + + /// Initialise the minimal in-guest interrupt infrastructure + /// (IDT/GIC, etc. depending on architecture). + fn setup_interrupt_handler(&mut self) -> TmkResult<()> { + crate::arch::interrupt::init(); + Ok(()) + } +} + +impl MsrPlatformTrait for HvTestCtx { + /// Read an MSR directly from the CPU and return the raw value. + fn read_msr(&mut self, msr: u32) -> TmkResult { + let r = unsafe { read_msr(msr) }; + Ok(r) + } + + /// Write an MSR directly on the CPU. + fn write_msr(&mut self, msr: u32, value: u64) -> TmkResult<()> { + unsafe { write_msr(msr, value) }; + Ok(()) + } +} + +impl VirtualProcessorPlatformTrait for HvTestCtx { + /// Fetch the content of the specified architectural register from + /// the current VTL for the executing VP. + fn get_register(&mut self, reg: u32) -> TmkResult { + #[cfg(target_arch = "x86_64")] + { + use hvdef::HvX64RegisterName; + let reg = HvX64RegisterName(reg); + let val = self.hvcall.get_register(reg.into(), None)?.as_u128(); + Ok(val) + } + + #[cfg(target_arch = "aarch64")] + { + use hvdef::HvAarch64RegisterName; + let reg = HvAarch64RegisterName(reg); + let val = self.hvcall.get_register(reg.into(), None)?.as_u128(); + Ok(val) + } + + #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))] + { + Err(TmkError(TmkErrorType::NotImplemented)) + } + } + + /// Return the number of logical processors present in the machine + /// by issuing the `cpuid` leaf 1 call on x86-64. + fn get_vp_count(&self) -> TmkResult { + #[cfg(target_arch = "x86_64")] + { + let mut result: u32; + unsafe { + asm!( + "push rbx", + "cpuid", + "mov {result:r}, rbx", + "pop rbx", + in("eax") 1u32, + out("ecx") _, + out("edx") _, + result = out(reg) result, + options(nomem, nostack) + ); + } + Ok((result >> 16) & 0xFF) + } + + #[cfg(not(target_arch = "x86_64"))] + { + Err(TmkError(TmkErrorType::NotImplemented)) + } + } + + /// Push a command onto the per-VP linked-list so it will be executed + /// by the busy-loop running in `exec_handler`. No scheduling happens + /// here – we simply enqueue. + fn queue_command_vp(&mut self, cmd: VpExecutor) -> TmkResult<()> { + let (vp_index, vtl, cmd) = cmd.get(); + let cmd = cmd.ok_or_else(|| TmkError(TmkErrorType::QueueCommandFailed))?; + cmdt() + .lock() + .get_mut(&vp_index) + .unwrap() + .push_back((cmd, vtl)); + Ok(()) + } + + /// Ensure the target VP is running in the requested VTL and queue + /// the command for execution. + /// – If the VP is not yet running, it is started with a default + /// context. + /// – If the command targets a different VTL than the current one, + /// control is switched via `vtl_call` / `vtl_return` so that the + /// executor loop can pick the command up. + /// in short every VP acts as an executor engine and + /// spins in `exec_handler` waiting for work. + fn start_on_vp(&mut self, cmd: VpExecutor) -> TmkResult<()> { + let (vp_index, vtl, cmd) = cmd.get(); + let cmd = cmd.ok_or_else(|| TmkError(TmkErrorType::InvalidParameter))?; + if vtl >= Vtl::Vtl2 { + return Err(TmkError(TmkErrorType::InvalidParameter)); + } + + let is_vp_running = self.vp_runing.get(&vp_index); + if let Some(_running_vtl) = is_vp_running { + log::debug!("both vtl0 and vtl1 are running for VP: {:?}", vp_index); + } else { + if vp_index == 0 { + let vp_context = self.get_default_context()?; + self.hvcall.enable_vp_vtl(0, Vtl::Vtl1, Some(vp_context))?; + + cmdt().lock().get_mut(&vp_index).unwrap().push_back(( + Box::new(move |ctx| { + ctx.switch_to_low_vtl(); + }), + Vtl::Vtl1, + )); + self.switch_to_high_vtl(); + self.vp_runing.insert(vp_index); + } else { + let (tx, rx) = sync_nostd::Channel::>::new().split(); + cmdt().lock().get_mut(&self.my_vp_idx).unwrap().push_back(( + Box::new(move |ctx| { + let r = ctx.enable_vp_vtl_with_default_context(vp_index, Vtl::Vtl1); + if r.is_err() { + let _ = tx.send(r); + return; + } + let r = ctx.start_running_vp_with_default_context(VpExecutor::new( + vp_index, + Vtl::Vtl0, + )); + if r.is_err() { + let _ = tx.send(r); + return; + } + let _ = tx.send(Ok(())); + ctx.switch_to_low_vtl(); + }), + Vtl::Vtl1, + )); + self.switch_to_high_vtl(); + log::debug!("VP{} waiting for start confirmation for vp from VTL1: {}", self.my_vp_idx, vp_index); + let rx = rx.recv(); + if let Ok(r) = rx { + r?; + } + self.vp_runing.insert(vp_index); + } + } + + cmdt() + .lock() + .get_mut(&vp_index) + .unwrap() + .push_back((cmd, vtl)); + + if vp_index == self.my_vp_idx && self.my_vtl != vtl { + if vtl == Vtl::Vtl0 { + self.switch_to_low_vtl(); + } else { + self.switch_to_high_vtl(); + } + } + Ok(()) + } + + /// Start the given VP in the current VTL using a freshly captured + /// context and *do not* queue any additional work. + fn start_running_vp_with_default_context( + &mut self, + cmd: VpExecutor, + ) -> TmkResult<()> { + let (vp_index, vtl, _cmd) = cmd.get(); + let vp_ctx = self.get_default_context()?; + self.hvcall + .start_virtual_processor(vp_index, vtl, Some(vp_ctx))?; + Ok(()) + } + + /// Return the index of the VP that is currently executing this code. + fn get_current_vp(&self) -> TmkResult { + Ok(self.my_vp_idx) + } +} + +impl VtlPlatformTrait for HvTestCtx { + /// Apply VTL protections to the supplied GPA range so that only the + /// provided VTL can access it. + fn apply_vtl_protection_for_memory(&mut self, range: Range, vtl: Vtl) -> TmkResult<()> { + self.hvcall + .apply_vtl_protections(MemoryRange::new(range), vtl)?; + Ok(()) + } + + /// Enable the specified VTL on a VP and seed it with a default + /// context captured from the current execution environment. + fn enable_vp_vtl_with_default_context(&mut self, vp_index: u32, vtl: Vtl) -> TmkResult<()> { + let vp_ctx = self.get_default_context()?; + self.hvcall.enable_vp_vtl(vp_index, vtl, Some(vp_ctx))?; + Ok(()) + } + + /// Return the VTL in which the current code is running. + fn get_current_vtl(&self) -> TmkResult { + Ok(self.my_vtl) + } + + /// Inject a default context into an already existing VP/VTL pair. + fn set_default_ctx_to_vp(&mut self, vp_index: u32, vtl: Vtl) -> TmkResult<()> { + let i: u8 = match vtl { + Vtl::Vtl0 => 0, + Vtl::Vtl1 => 1, + Vtl::Vtl2 => 2, + }; + let vp_context = self.get_default_context()?; + self.hvcall.set_vp_registers( + vp_index, + Some( + HvInputVtl::new() + .with_target_vtl_value(i) + .with_use_target_vtl(true), + ), + Some(vp_context), + )?; + Ok(()) + } + + /// Enable VTL support for the entire partition. + fn setup_partition_vtl(&mut self, vtl: Vtl) -> TmkResult<()> { + self.hvcall + .enable_partition_vtl(hvdef::HV_PARTITION_ID_SELF, vtl)?; + log::info!("enabled vtl protections for the partition."); + Ok(()) + } + + /// Turn on VTL protections for the currently running VTL. + fn setup_vtl_protection(&mut self) -> TmkResult<()> { + self.hvcall.enable_vtl_protection(HvInputVtl::CURRENT_VTL)?; + log::info!("enabled vtl protections for the partition."); + Ok(()) + } + + /// Switch execution from the current (low) VTL to the next higher + /// one (`vtl_call`). + fn switch_to_high_vtl(&mut self) { + HvCall::vtl_call(); + } + + /// Return from a high VTL back to the low VTL (`vtl_return`). + fn switch_to_low_vtl(&mut self) { + HvCall::vtl_return(); + } +} + +impl HvTestCtx { + /// Construct an *un-initialised* test context. + /// Call [`HvTestCtx::init`] before using the value. + pub const fn new() -> Self { + HvTestCtx { + hvcall: HvCall::new(), + vp_runing: BTreeSet::new(), + my_vp_idx: 0, + my_vtl: Vtl::Vtl0, + } + } + + /// Perform the one-time initialisation sequence: + /// – initialise the hypercall page, + /// – discover the VP count and create command queues, + /// – record the current VTL. + pub fn init(&mut self) -> TmkResult<()> { + self.hvcall.initialize(); + let vp_count = self.get_vp_count()?; + for i in 0..vp_count { + register_command_queue(i); + } + self.my_vtl = self.hvcall.vtl(); + Ok(()) + } + + /// Busy-loop executor that runs on every VP. + /// Extracts commands from the per-VP queue and executes them in the + /// appropriate VTL, switching VTLs when necessary. + fn exec_handler() { + let mut ctx = HvTestCtx::new(); + ctx.init().expect("error: failed to init on a VP"); + let reg = ctx + .hvcall + .get_register(hvdef::HvAllArchRegisterName::VpIndex.into(), None) + .expect("error: failed to get vp index"); + let reg = reg.as_u64(); + ctx.my_vp_idx = reg as u32; + + loop { + let mut vtl: Option = None; + let mut cmd: Option> = None; + + { + let mut cmdt = cmdt().lock(); + let d = cmdt.get_mut(&ctx.my_vp_idx); + if d.is_some() { + log::info!("vp: {} has commands to execute", ctx.my_vp_idx); + let d = d.unwrap(); + if !d.is_empty() { + let (_c, v) = d.front().unwrap(); + if *v == ctx.my_vtl { + let (c, _v) = d.pop_front().unwrap(); + cmd = Some(c); + } else { + vtl = Some(*v); + } + } + } + } + + if let Some(vtl) = vtl { + if vtl == Vtl::Vtl0 { + ctx.switch_to_low_vtl(); + } else { + ctx.switch_to_high_vtl(); + } + } + + if let Some(cmd) = cmd { + cmd(&mut ctx); + } + } + } + + #[cfg(target_arch = "x86_64")] + /// Capture the current VP context, patch the entry point and stack + /// so that the new VP starts in `exec_handler`. + fn get_default_context(&mut self) -> Result { + return self.run_fn_with_current_context(HvTestCtx::exec_handler); + } + + #[cfg(target_arch = "x86_64")] + /// Helper to wrap an arbitrary function inside a captured VP context + /// that can later be used to start a new VP/VTL instance. + fn run_fn_with_current_context(&mut self, func: fn()) -> Result { + use super::alloc::SIZE_1MB; + + let mut vp_context: InitialVpContextX64 = self + .hvcall + .get_current_vtl_vp_context() + .expect("Failed to get VTL1 context"); + let stack_layout = Layout::from_size_align(SIZE_1MB, 16) + .expect("Failed to create layout for stack allocation"); + let allocated_stack_ptr = unsafe { ALLOCATOR.alloc(stack_layout) }; + if allocated_stack_ptr.is_null() { + return Err(TmkErrorType::AllocationFailed.into()); + } + let stack_size = stack_layout.size(); + let stack_top = allocated_stack_ptr as u64 + stack_size as u64; + let fn_ptr = func as fn(); + let fn_address = fn_ptr as u64; + vp_context.rip = fn_address; + vp_context.rsp = stack_top; + Ok(vp_context) + } +} + +impl From for TmkError { + fn from(e: hvdef::HvError) -> Self { + log::debug!("Converting hvdef::HvError::{:?} to TmkError", e); + let tmk_error_type = match e { + hvdef::HvError::InvalidHypercallCode => TmkErrorType::InvalidHypercallCode, + hvdef::HvError::InvalidHypercallInput => TmkErrorType::InvalidHypercallInput, + hvdef::HvError::InvalidAlignment => TmkErrorType::InvalidAlignment, + hvdef::HvError::InvalidParameter => TmkErrorType::InvalidParameter, + hvdef::HvError::AccessDenied => TmkErrorType::AccessDenied, + hvdef::HvError::InvalidPartitionState => TmkErrorType::InvalidPartitionState, + hvdef::HvError::OperationDenied => TmkErrorType::OperationDenied, + hvdef::HvError::UnknownProperty => TmkErrorType::UnknownProperty, + hvdef::HvError::PropertyValueOutOfRange => TmkErrorType::PropertyValueOutOfRange, + hvdef::HvError::InsufficientMemory => TmkErrorType::InsufficientMemory, + hvdef::HvError::PartitionTooDeep => TmkErrorType::PartitionTooDeep, + hvdef::HvError::InvalidPartitionId => TmkErrorType::InvalidPartitionId, + hvdef::HvError::InvalidVpIndex => TmkErrorType::InvalidVpIndex, + hvdef::HvError::NotFound => TmkErrorType::NotFound, + hvdef::HvError::InvalidPortId => TmkErrorType::InvalidPortId, + hvdef::HvError::InvalidConnectionId => TmkErrorType::InvalidConnectionId, + hvdef::HvError::InsufficientBuffers => TmkErrorType::InsufficientBuffers, + hvdef::HvError::NotAcknowledged => TmkErrorType::NotAcknowledged, + hvdef::HvError::InvalidVpState => TmkErrorType::InvalidVpState, + hvdef::HvError::Acknowledged => TmkErrorType::Acknowledged, + hvdef::HvError::InvalidSaveRestoreState => TmkErrorType::InvalidSaveRestoreState, + hvdef::HvError::InvalidSynicState => TmkErrorType::InvalidSynicState, + hvdef::HvError::ObjectInUse => TmkErrorType::ObjectInUse, + hvdef::HvError::InvalidProximityDomainInfo => TmkErrorType::InvalidProximityDomainInfo, + hvdef::HvError::NoData => TmkErrorType::NoData, + hvdef::HvError::Inactive => TmkErrorType::Inactive, + hvdef::HvError::NoResources => TmkErrorType::NoResources, + hvdef::HvError::FeatureUnavailable => TmkErrorType::FeatureUnavailable, + hvdef::HvError::PartialPacket => TmkErrorType::PartialPacket, + hvdef::HvError::ProcessorFeatureNotSupported => { + TmkErrorType::ProcessorFeatureNotSupported + } + hvdef::HvError::ProcessorCacheLineFlushSizeIncompatible => { + TmkErrorType::ProcessorCacheLineFlushSizeIncompatible + } + hvdef::HvError::InsufficientBuffer => TmkErrorType::InsufficientBuffer, + hvdef::HvError::IncompatibleProcessor => TmkErrorType::IncompatibleProcessor, + hvdef::HvError::InsufficientDeviceDomains => TmkErrorType::InsufficientDeviceDomains, + hvdef::HvError::CpuidFeatureValidationError => { + TmkErrorType::CpuidFeatureValidationError + } + hvdef::HvError::CpuidXsaveFeatureValidationError => { + TmkErrorType::CpuidXsaveFeatureValidationError + } + hvdef::HvError::ProcessorStartupTimeout => TmkErrorType::ProcessorStartupTimeout, + hvdef::HvError::SmxEnabled => TmkErrorType::SmxEnabled, + hvdef::HvError::InvalidLpIndex => TmkErrorType::InvalidLpIndex, + hvdef::HvError::InvalidRegisterValue => TmkErrorType::InvalidRegisterValue, + hvdef::HvError::InvalidVtlState => TmkErrorType::InvalidVtlState, + hvdef::HvError::NxNotDetected => TmkErrorType::NxNotDetected, + hvdef::HvError::InvalidDeviceId => TmkErrorType::InvalidDeviceId, + hvdef::HvError::InvalidDeviceState => TmkErrorType::InvalidDeviceState, + hvdef::HvError::PendingPageRequests => TmkErrorType::PendingPageRequests, + hvdef::HvError::PageRequestInvalid => TmkErrorType::PageRequestInvalid, + hvdef::HvError::KeyAlreadyExists => TmkErrorType::KeyAlreadyExists, + hvdef::HvError::DeviceAlreadyInDomain => TmkErrorType::DeviceAlreadyInDomain, + hvdef::HvError::InvalidCpuGroupId => TmkErrorType::InvalidCpuGroupId, + hvdef::HvError::InvalidCpuGroupState => TmkErrorType::InvalidCpuGroupState, + hvdef::HvError::OperationFailed => TmkErrorType::OperationFailed, + hvdef::HvError::NotAllowedWithNestedVirtActive => { + TmkErrorType::NotAllowedWithNestedVirtActive + } + hvdef::HvError::InsufficientRootMemory => TmkErrorType::InsufficientRootMemory, + hvdef::HvError::EventBufferAlreadyFreed => TmkErrorType::EventBufferAlreadyFreed, + hvdef::HvError::Timeout => TmkErrorType::Timeout, + hvdef::HvError::VtlAlreadyEnabled => TmkErrorType::VtlAlreadyEnabled, + hvdef::HvError::UnknownRegisterName => TmkErrorType::UnknownRegisterName, + // Add any other specific mappings here if hvdef::HvError has more variants + _ => { + log::warn!( + "Unhandled hvdef::HvError variant: {:?}. Mapping to TmkErrorType::OperationFailed.", + e + ); + TmkErrorType::OperationFailed // Generic fallback + } + }; + log::debug!( + "Mapped hvdef::HvError::{:?} to TmkErrorType::{:?}", + e, + tmk_error_type + ); + TmkError(tmk_error_type) + } +} diff --git a/opentmk/opentmk/src/uefi/init.rs b/opentmk/opentmk/src/uefi/init.rs new file mode 100644 index 0000000000..aa2eac3402 --- /dev/null +++ b/opentmk/opentmk/src/uefi/init.rs @@ -0,0 +1,56 @@ +use uefi::{boot::{exit_boot_services, MemoryType}, guid, CStr16, Status}; + +use super::alloc::ALLOCATOR; + +const EFI_GUID: uefi::Guid = guid!("610b9e98-c6f6-47f8-8b47-2d2da0d52a91"); +const OS_LOADER_INDICATIONS: &'static str = "OsLoaderIndications"; + +fn enable_uefi_vtl_protection() { + let mut buf = vec![0u8; 1024]; + let mut str_buff = vec![0u16; 1024]; + let os_loader_indications_key = + CStr16::from_str_with_buf(OS_LOADER_INDICATIONS, str_buff.as_mut_slice()).unwrap(); + + let os_loader_indications_result = uefi::runtime::get_variable( + os_loader_indications_key, + &uefi::runtime::VariableVendor(EFI_GUID), + buf.as_mut(), + ) + .expect("Failed to get OsLoaderIndications"); + + let mut os_loader_indications = u32::from_le_bytes( + os_loader_indications_result.0[0..4] + .try_into() + .expect("error in output"), + ); + os_loader_indications |= 0x1u32; + + let os_loader_indications = os_loader_indications.to_le_bytes(); + + let _ = uefi::runtime::set_variable( + os_loader_indications_key, + &uefi::runtime::VariableVendor(EFI_GUID), + os_loader_indications_result.1, + &os_loader_indications, + ) + .expect("Failed to set OsLoaderIndications"); + + let _os_loader_indications_result = uefi::runtime::get_variable( + os_loader_indications_key, + &uefi::runtime::VariableVendor(EFI_GUID), + buf.as_mut(), + ) + .expect("Failed to get OsLoaderIndications"); + + let _memory_map = unsafe { exit_boot_services(MemoryType::BOOT_SERVICES_DATA) }; +} + +pub fn init() -> Result<(), Status> { + let r: bool = ALLOCATOR.init(2048); + if r == false { + return Err(Status::ABORTED); + } + crate::tmk_logger::init().expect("Failed to init logger"); + enable_uefi_vtl_protection(); + Ok(()) +} \ No newline at end of file diff --git a/opentmk/opentmk/src/uefi/mod.rs b/opentmk/opentmk/src/uefi/mod.rs new file mode 100644 index 0000000000..a1e677259d --- /dev/null +++ b/opentmk/opentmk/src/uefi/mod.rs @@ -0,0 +1,24 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +mod alloc; +mod hypvctx; +pub mod init; +mod rt; +mod tests; + +use crate::tmk_assert; +use init::init; +use uefi::entry; +use uefi::Status; + +#[entry] +fn uefi_main() -> Status { + let r= init(); + tmk_assert!(r.is_ok(), "init should succeed"); + + log::warn!("TEST_START"); + tests::run_test(); + log::warn!("TEST_END"); + Status::SUCCESS +} diff --git a/opentmk/opentmk/src/uefi/rt.rs b/opentmk/opentmk/src/uefi/rt.rs new file mode 100644 index 0000000000..46cfc431d4 --- /dev/null +++ b/opentmk/opentmk/src/uefi/rt.rs @@ -0,0 +1,15 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +//! Runtime support for the UEFI application environment. + +#![cfg(target_os = "uefi")] +// UNSAFETY: Raw assembly needed for panic handling to abort. +use core::arch::asm; + +#[panic_handler] +fn panic_handler(panic: &core::panic::PanicInfo<'_>) -> ! { + log::error!("Panic at runtime: {}", panic); + log::warn!("TEST_END"); + loop {} +} diff --git a/opentmk/opentmk/src/uefi/tests/hv_error_vp_start.rs b/opentmk/opentmk/src/uefi/tests/hv_error_vp_start.rs new file mode 100644 index 0000000000..45aa4b2248 --- /dev/null +++ b/opentmk/opentmk/src/uefi/tests/hv_error_vp_start.rs @@ -0,0 +1,47 @@ +use hvdef::Vtl; +use sync_nostd::Channel; + +use crate::{context::{VirtualProcessorPlatformTrait, VpExecutor, VtlPlatformTrait}, tmk_assert}; + +pub fn exec(ctx: &mut T) +where + T: VtlPlatformTrait + + VirtualProcessorPlatformTrait, +{ + + // Skiping VTL setup for now to test the negitive case + + let vp_count = ctx.get_vp_count(); + tmk_assert!(vp_count.is_ok(), "get_vp_count should succeed"); + + let vp_count = vp_count.unwrap(); + tmk_assert!(vp_count == 8, "vp count should be 8"); + + // Testing BSP VTL1 Bringup + { + let (tx, _rx) = Channel::new().split(); + + let result = ctx.start_on_vp(VpExecutor::new(0, Vtl::Vtl1).command(move |ctx: &mut T| { + let vp = ctx.get_current_vp(); + tmk_assert!(vp.is_ok(), "vp should be valid"); + + let vp = vp.unwrap(); + log::info!("vp: {}", vp); + tmk_assert!(vp == 0, "vp should be equal to 0"); + + let vtl = ctx.get_current_vtl(); + tmk_assert!(vtl.is_ok(), "vtl should be valid"); + + let vtl = vtl.unwrap(); + log::info!("vtl: {:?}", vtl); + tmk_assert!(vtl == Vtl::Vtl1, "vtl should be Vtl1 for BSP"); + tx.send(()) + .expect("Failed to send message through the channel"); + ctx.switch_to_low_vtl(); + })); + + tmk_assert!(result.is_err(), "start_on_vp should fail"); + tmk_assert!(result.unwrap_err() == crate::tmkdefs::TmkErrorType::InvalidVtlState.into(), "start_on_vp should fail with InvalidVtlState"); + log::info!("result on start_on_vp: {:?}", result); + } +} diff --git a/opentmk/opentmk/src/uefi/tests/hv_misc.rs b/opentmk/opentmk/src/uefi/tests/hv_misc.rs new file mode 100644 index 0000000000..a912a94595 --- /dev/null +++ b/opentmk/opentmk/src/uefi/tests/hv_misc.rs @@ -0,0 +1,137 @@ +#![allow(warnings)] +use crate::context::{ + InterruptPlatformTrait, SecureInterceptPlatformTrait, VirtualProcessorPlatformTrait, + VtlPlatformTrait, +}; +// WIP : This test is not yet complete and is not expected to pass. +// +// This test is to verify that the VTL protections are working as expected. +// The stack values in VTL0 are changing after interrupt handling in VTL1. +use crate::tmk_assert; +use crate::tmkdefs::TmkResult; +use crate::uefi::alloc::{ALLOCATOR, SIZE_1MB}; +use crate::{context, uefi::hypvctx}; +use ::alloc::boxed::Box; +use ::alloc::vec::Vec; +use alloc::sync::Arc; +use context::VpExecutor; +use core::alloc::{GlobalAlloc, Layout}; +use core::arch::asm; +use core::cell::RefCell; +use core::ops::Range; +use core::sync::atomic::{AtomicI32, Ordering}; +use hvdef::hypercall::HvInputVtl; +use hvdef::{HvAllArchRegisterName, HvRegisterVsmVpStatus, HvX64RegisterName, Vtl}; +use hypvctx::HvTestCtx; +use sync_nostd::{Channel, Receiver, Sender}; +use uefi::entry; +use uefi::Status; + +static mut HEAPX: RefCell<*mut u8> = RefCell::new(0 as *mut u8); +static mut CON: AtomicI32 = AtomicI32::new(0); + +pub fn exec(ctx: &mut T) +where + T: InterruptPlatformTrait + + SecureInterceptPlatformTrait + + VtlPlatformTrait + + VirtualProcessorPlatformTrait, +{ + log::info!("ctx ptr: {:p}", &ctx as *const _); + + let vp_count = ctx.get_vp_count(); + tmk_assert!(vp_count.is_ok(), "get_vp_count should succeed"); + let vp_count = vp_count.unwrap(); + tmk_assert!(vp_count == 8, "vp count should be 8"); + + ctx.setup_interrupt_handler(); + + log::info!("set intercept handler successfully!"); + + ctx.setup_partition_vtl(Vtl::Vtl1); + + ctx.start_on_vp(VpExecutor::new(0, Vtl::Vtl1).command(move |ctx: &mut T| { + log::info!("successfully started running VTL1 on vp0."); + ctx.setup_secure_intercept(0x30); + ctx.set_interrupt_idx(0x30, move || { + log::info!("interrupt fired!"); + + log::info!("interrupt handled!"); + }); + + let layout = Layout::from_size_align(SIZE_1MB, 4096).expect("msg: failed to create layout"); + let ptr = unsafe { ALLOCATOR.alloc(layout) }; + log::info!("allocated some memory in the heap from vtl1"); + unsafe { + let mut z = HEAPX.borrow_mut(); + *z = ptr; + *ptr.add(10) = 0xAA; + } + + let size = layout.size(); + ctx.setup_vtl_protection(); + + log::info!("enabled vtl protections for the partition."); + + let range = Range { + start: ptr as u64, + end: ptr as u64 + size as u64, + }; + + ctx.apply_vtl_protection_for_memory(range, Vtl::Vtl1); + + log::info!("moving to vtl0 to attempt to read the heap memory"); + + ctx.switch_to_low_vtl(); + })); + + ctx.queue_command_vp(VpExecutor::new(0, Vtl::Vtl1).command(move |ctx: &mut T| { + log::info!("successfully started running VTL1 on vp0."); + ctx.switch_to_low_vtl(); + })); + log::info!("ctx ptr: {:p}", &ctx as *const _); + + let mut l = 0u64; + unsafe { asm!("mov {}, rsp", out(reg) l) }; + log::info!("rsp: 0x{:x}", l); + unsafe { + log::info!("Attempting to read heap memory from vtl0"); + let heapx = *HEAPX.borrow(); + let val = *(heapx.add(10)); + log::info!( + "reading mutated heap memory from vtl0(it should not be 0xAA): 0x{:x}", + val + ); + tmk_assert!( + val != 0xAA, + "heap memory should not be accessible from vtl0" + ); + } + + + log::info!("after ctx ptr: {:p}", &ctx as *const _); + unsafe { asm!("mov {}, rsp", out(reg) l) }; + log::info!("rsp: 0x{:x}", l); + + // let (mut tx, mut rx) = Channel::new(1); + // { + // let mut tx = tx.clone(); + // ctx.start_on_vp(VpExecutor::new(2, Vtl::Vtl0).command( + // move |ctx: &mut dyn TestCtxTrait| { + // log::info!("Hello form vtl0 on vp2!"); + // tx.send(()); + // }, + // )); + // } + log::info!("ctx ptr: {:p}", &ctx as *const _); + let c = ctx.get_vp_count(); + tmk_assert!(c.is_ok(), "get_vp_count should succeed"); + let c = c.unwrap(); + tmk_assert!(c == 8, "vp count should be 8"); + + // rx.recv(); + + log::info!("we are in vtl0 now!"); + log::info!("we reached the end of the test"); + loop {} +} diff --git a/opentmk/opentmk/src/uefi/tests/hv_processor.rs b/opentmk/opentmk/src/uefi/tests/hv_processor.rs new file mode 100644 index 0000000000..a7e12f89a5 --- /dev/null +++ b/opentmk/opentmk/src/uefi/tests/hv_processor.rs @@ -0,0 +1,102 @@ +use hvdef::Vtl; +use sync_nostd::Channel; + +use crate::{ + context::{ + VirtualProcessorPlatformTrait, + VpExecutor, VtlPlatformTrait, + }, + tmk_assert, +}; + +pub fn exec(ctx: &mut T) +where + T: VtlPlatformTrait + + VirtualProcessorPlatformTrait, +{ + let r = ctx.setup_partition_vtl(Vtl::Vtl1); + tmk_assert!(r.is_ok(), "setup_partition_vtl should succeed"); + + let vp_count = ctx.get_vp_count(); + tmk_assert!(vp_count.is_ok(), "get_vp_count should succeed"); + + let vp_count = vp_count.unwrap(); + tmk_assert!(vp_count == 8, "vp count should be 8"); + + // Testing BSP VTL Bringup + { + let (tx, rx) = Channel::new().split(); + let result = ctx.start_on_vp(VpExecutor::new(0, Vtl::Vtl1).command(move |ctx: &mut T| { + let vp = ctx.get_current_vp(); + tmk_assert!(vp.is_ok(), "vp should be valid"); + + let vp = vp.unwrap(); + log::info!("vp: {}", vp); + tmk_assert!(vp == 0, "vp should be equal to 0"); + + let vtl = ctx.get_current_vtl(); + tmk_assert!(vtl.is_ok(), "vtl should be valid"); + + let vtl = vtl.unwrap(); + log::info!("vtl: {:?}", vtl); + tmk_assert!(vtl == Vtl::Vtl1, "vtl should be Vtl1 for BSP"); + tx.send(()) + .expect("Failed to send message through the channel"); + ctx.switch_to_low_vtl(); + })); + tmk_assert!(result.is_ok(), "start_on_vp should succeed"); + _ = rx.recv(); + } + + for i in 1..vp_count { + // Testing VTL1 + { + let (tx, rx) = Channel::new().split(); + let result = + ctx.start_on_vp(VpExecutor::new(i, Vtl::Vtl1).command(move |ctx: &mut T| { + let vp = ctx.get_current_vp(); + tmk_assert!(vp.is_ok(), "vp should be valid"); + + let vp = vp.unwrap(); + log::info!("vp: {}", vp); + tmk_assert!(vp == i, format!("vp should be equal to {}", i)); + + let vtl = ctx.get_current_vtl(); + tmk_assert!(vtl.is_ok(), "vtl should be valid"); + + let vtl = vtl.unwrap(); + log::info!("vtl: {:?}", vtl); + tmk_assert!(vtl == Vtl::Vtl1, format!("vtl should be Vtl1 for VP {}", i)); + _ = tx.send(()); + })); + tmk_assert!(result.is_ok(), "start_on_vp should succeed"); + _ = rx.recv(); + } + + // Testing VTL0 + { + let (tx, rx) = Channel::new().split(); + let result = + ctx.start_on_vp(VpExecutor::new(i, Vtl::Vtl0).command(move |ctx: &mut T| { + let vp = ctx.get_current_vp(); + tmk_assert!(vp.is_ok(), "vp should be valid"); + + let vp = vp.unwrap(); + log::info!("vp: {}", vp); + tmk_assert!(vp == i, format!("vp should be equal to {}", i)); + + let vtl = ctx.get_current_vtl(); + tmk_assert!(vtl.is_ok(), "vtl should be valid"); + + let vtl = vtl.unwrap(); + log::info!("vtl: {:?}", vtl); + tmk_assert!(vtl == Vtl::Vtl0, format!("vtl should be Vtl0 for VP {}", i)); + _ = tx.send(()); + })); + tmk_assert!(result.is_ok(), "start_on_vp should succeed"); + _ = rx.recv(); + } + } + + log::warn!("All VPs have been tested"); +} diff --git a/opentmk/opentmk/src/uefi/tests/mod.rs b/opentmk/opentmk/src/uefi/tests/mod.rs new file mode 100644 index 0000000000..75fc35f851 --- /dev/null +++ b/opentmk/opentmk/src/uefi/tests/mod.rs @@ -0,0 +1,13 @@ +#![allow(dead_code)] +use super::hypvctx::HvTestCtx; + +mod hv_processor; +mod hv_misc; +mod hv_error_vp_start; + + +pub fn run_test() { + let mut ctx = HvTestCtx::new(); + ctx.init().expect("failed to init on BSP"); + hv_processor::exec(&mut ctx); +} \ No newline at end of file diff --git a/opentmk/sync/Cargo.toml b/opentmk/sync/Cargo.toml new file mode 100644 index 0000000000..53f9ba2ad6 --- /dev/null +++ b/opentmk/sync/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "sync_nostd" +version = "0.1.0" +rust-version.workspace = true +edition.workspace = true + +[dependencies] +spin.workspace = true + + +[lints] +workspace = true diff --git a/opentmk/sync/src/lib.rs b/opentmk/sync/src/lib.rs new file mode 100644 index 0000000000..e3a387c02c --- /dev/null +++ b/opentmk/sync/src/lib.rs @@ -0,0 +1,332 @@ +#![no_std] +#![allow(unsafe_code)] +extern crate alloc; +use core::sync::atomic::{AtomicUsize, Ordering}; +pub use spin::{Mutex, MutexGuard}; +use alloc::{sync::Arc, vec::Vec}; +use alloc::collections::VecDeque; +use core::error::Error; +use core::fmt; + +/// An unbounded channel implementation with priority send capability. +/// This implementation works in no_std environments using spin-rs. +/// It uses a VecDeque as the underlying buffer and ensures non-blocking operations. +pub struct Channel { + inner: Arc>, +} + +/// The inner data structure holding the channel state +struct ChannelInner { + /// The internal buffer using a VecDeque protected by its own mutex + buffer: Mutex>, + + /// Number of active senders + senders: AtomicUsize, + + /// Number of active receivers + receivers: AtomicUsize, +} + +unsafe impl Send for ChannelInner {} +unsafe impl Sync for ChannelInner {} + +/// Error type for sending operations +#[derive(Debug, Eq, PartialEq)] +pub enum SendError { + /// All receivers have been dropped + Disconnected(T), +} + +impl fmt::Display for SendError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + SendError::Disconnected(_) => write!(f, "send failed because receiver is disconnected"), + } + } +} + +impl Error for SendError {} + +/// Error type for receiving operations +#[derive(Debug, Eq, PartialEq)] +pub enum RecvError { + /// Channel is empty + Empty, + /// All senders have been dropped + Disconnected, +} + +impl fmt::Display for RecvError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + RecvError::Empty => write!(f, "receive failed because channel is empty"), + RecvError::Disconnected => write!(f, "receive failed because sender is disconnected"), + } + } +} + +impl Error for RecvError {} + +/// Sender half of the channel +pub struct Sender { + inner: Arc>, +} + +/// Receiver half of the channel +pub struct Receiver { + inner: Arc>, +} + +// implement clone for Sender +impl Clone for Sender { + fn clone(&self) -> Self { + self.inner.senders.fetch_add(1, Ordering::SeqCst); + Sender { + inner: self.inner.clone(), + } + } +} + +// implement clone for Receiver +impl Clone for Receiver { + fn clone(&self) -> Self { + self.inner.receivers.fetch_add(1, Ordering::SeqCst); + Receiver { + inner: self.inner.clone(), + } + } +} + +impl Channel { + /// Creates a new unbounded channel + pub fn new() -> Self { + let inner = Arc::new(ChannelInner { + buffer: Mutex::new(VecDeque::new()), + senders: AtomicUsize::new(1), // Start with one sender + receivers: AtomicUsize::new(1), // Start with one receiver + }); + + Self { inner } + } + + /// Splits the channel into a sender and receiver pair + pub fn split(self) -> (Sender, Receiver) { + let sender = Sender { + inner: self.inner.clone(), + }; + + let receiver = Receiver { + inner: self.inner, + }; + + (sender, receiver) + } + + /// Returns the current number of elements in the channel + pub fn len(&self) -> usize { + self.inner.buffer.lock().len() + } + + /// Returns true if the channel is empty + pub fn is_empty(&self) -> bool { + self.inner.buffer.lock().is_empty() + } +} + +impl Sender { + /// Sends an element to the back of the queue + /// Returns Ok(()) if successful, Err(SendError) if all receivers have been dropped + pub fn send(&self, value: T) -> Result<(), SendError> { + // Check if there are any receivers left + if self.inner.receivers.load(Ordering::SeqCst) == 0 { + return Err(SendError::Disconnected(value)); + } + + // Lock the buffer - only locked during the actual send operation + let mut buffer = self.inner.buffer.lock(); + + // Check again after locking + if self.inner.receivers.load(Ordering::SeqCst) == 0 { + return Err(SendError::Disconnected(value)); + } + + // Push to the back of the queue - can't fail since we're unbounded + buffer.push_back(value); + + Ok(()) + } + + /// Sends an element to the front of the queue (highest priority) + /// Returns Ok(()) if successful, Err(SendError) if all receivers have been dropped + pub fn send_priority(&self, value: T) -> Result<(), SendError> { + // Check if there are any receivers left + if self.inner.receivers.load(Ordering::SeqCst) == 0 { + return Err(SendError::Disconnected(value)); + } + + // Lock the buffer - only locked during the actual send operation + let mut buffer = self.inner.buffer.lock(); + + // Check again after locking + if self.inner.receivers.load(Ordering::SeqCst) == 0 { + return Err(SendError::Disconnected(value)); + } + + // Push to the front of the queue - can't fail since we're unbounded + buffer.push_front(value); + + Ok(()) + } + + /// Send a batch of elements at once + /// Returns the number of elements successfully sent (all of them, unless disconnected) + pub fn send_batch(&self, items: I) -> usize + where + I: IntoIterator, + { + // Check if there are any receivers left + if self.inner.receivers.load(Ordering::SeqCst) == 0 { + return 0; + } + + // Lock the buffer once for the entire batch + let mut buffer = self.inner.buffer.lock(); + + // Check again after locking + if self.inner.receivers.load(Ordering::SeqCst) == 0 { + return 0; + } + + let mut count = 0; + + // Push each item to the back of the queue + for item in items { + buffer.push_back(item); + count += 1; + } + + count + } + + /// Returns the current number of elements in the channel + pub fn len(&self) -> usize { + self.inner.buffer.lock().len() + } + + /// Returns true if the channel is empty + pub fn is_empty(&self) -> bool { + self.inner.buffer.lock().is_empty() + } +} + +impl Receiver { + /// Tries to receive an element from the front of the queue without blocking + /// Returns Ok(value) if successful, Err(RecvError) otherwise + pub fn recv(&self) -> Result { + loop { + match self.try_recv() { + Ok(value) => return Ok(value), + Err(RecvError::Empty) => { + // Yield to the scheduler and try again + continue; + }, + Err(err) => return Err(err), + } + } + } + + /// Tries to receive an element from the front of the queue without blocking + /// Returns Ok(value) if successful, Err(RecvError) otherwise + pub fn try_recv(&self) -> Result { + // Use a separate scope for the lock to ensure it's released promptly + let result = { + let mut buffer = self.inner.buffer.lock(); + buffer.pop_front() + }; + + match result { + Some(val) => Ok(val), + None => { + // Check if there are any senders left + if self.inner.senders.load(Ordering::SeqCst) == 0 { + Err(RecvError::Disconnected) + } else { + Err(RecvError::Empty) + } + } + } + } + + + /// Tries to receive multiple elements at once, up to the specified limit + /// Returns a vector of received elements + pub fn recv_batch(&self, max_items: usize) -> Vec + where + T: Send, + { + // If max_items is 0, return an empty vector + if max_items == 0 { + return Vec::new(); + } + + let mut items = Vec::new(); + + // Lock the buffer once for the entire batch + let mut buffer = self.inner.buffer.lock(); + + // Calculate how many items to take + let count = max_items.min(buffer.len()); + + // Reserve capacity for efficiency + items.reserve(count); + + // Take items from the front of the queue + for _ in 0..count { + if let Some(item) = buffer.pop_front() { + items.push(item); + } else { + // This shouldn't happen due to the min() above, but just in case + break; + } + } + + items + } + + /// Peeks at the next element without removing it + pub fn peek(&self) -> Option + where + T: Clone, + { + let buffer = self.inner.buffer.lock(); + buffer.front().cloned() + } + + /// Returns the current number of elements in the channel + pub fn len(&self) -> usize { + self.inner.buffer.lock().len() + } + + /// Returns true if the channel is empty + pub fn is_empty(&self) -> bool { + self.inner.buffer.lock().is_empty() + } +} + +impl Drop for Sender { + fn drop(&mut self) { + self.inner.senders.fetch_sub(1, Ordering::SeqCst); + } +} + +impl Drop for Receiver { + fn drop(&mut self) { + self.inner.receivers.fetch_sub(1, Ordering::SeqCst); + } +} + +impl Default for Channel { + fn default() -> Self { + Self::new() + } +} \ No newline at end of file diff --git a/xtask/src/tasks/guest_test/uefi/gpt_efi_disk.rs b/xtask/src/tasks/guest_test/uefi/gpt_efi_disk.rs index fb4641a93b..862db7e55c 100644 --- a/xtask/src/tasks/guest_test/uefi/gpt_efi_disk.rs +++ b/xtask/src/tasks/guest_test/uefi/gpt_efi_disk.rs @@ -22,7 +22,7 @@ pub fn create_gpt_efi_disk(out_img: &Path, with_files: &[(&Path, &Path)]) -> Res )); } - let disk_size = 1024 * 1024 * 32; // 32MB disk should be enough for our tests + let disk_size = 1024 * 1024 * 512; // 32MB disk should be enough for our tests let num_sectors = disk_size / SECTOR_SIZE; let mut disk = vec![0; num_sectors * SECTOR_SIZE];