Skip to content

Commit 3797009

Browse files
cpercivaaljimenezb
andcommitted
pvh/arch-x86_64: Initialize vCPU regs for PVH
Set the initial values of the KVM vCPU registers as specified in the PVH boot ABI: https://xenbits.xen.org/docs/unstable/misc/pvh.html Add stub bits for aarch64; PVH mode does not exist there. Signed-off-by: Colin Percival <cperciva@freebsd.org> Co-authored-by: Alejandro Jimenez <alejandro.j.jimenez@oracle.com>
1 parent 8b622ce commit 3797009

File tree

7 files changed

+228
-74
lines changed

7 files changed

+228
-74
lines changed

src/vmm/src/arch/x86_64/gdt.rs

Lines changed: 30 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
// Copyright © 2020, Oracle and/or its affiliates.
2+
//
13
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
24
// SPDX-License-Identifier: Apache-2.0
35
//
@@ -24,8 +26,34 @@ fn get_base(entry: u64) -> u64 {
2426
| (((entry) & 0x0000_0000_FFFF_0000) >> 16)
2527
}
2628

29+
// Extract the segment limit from the GDT segment descriptor.
30+
//
31+
// In a segment descriptor, the limit field is 20 bits, so it can directly describe
32+
// a range from 0 to 0xFFFFF (1 MB). When G flag is set (4-KByte page granularity) it
33+
// scales the value in the limit field by a factor of 2^12 (4 Kbytes), making the effective
34+
// limit range from 0xFFF (4 KBytes) to 0xFFFF_FFFF (4 GBytes).
35+
//
36+
// However, the limit field in the VMCS definition is a 32 bit field, and the limit value is not
37+
// automatically scaled using the G flag. This means that for a desired range of 4GB for a
38+
// given segment, its limit must be specified as 0xFFFF_FFFF. Therefore the method of obtaining
39+
// the limit from the GDT entry is not sufficient, since it only provides 20 bits when 32 bits
40+
// are necessary. Fortunately, we can check if the G flag is set when extracting the limit since
41+
// the full GDT entry is passed as an argument, and perform the scaling of the limit value to
42+
// return the full 32 bit value.
43+
//
44+
// The scaling mentioned above is required when using PVH boot, since the guest boots in protected
45+
// (32-bit) mode and must be able to access the entire 32-bit address space. It does not cause
46+
// issues for the case of direct boot to 64-bit (long) mode, since in 64-bit mode the processor does
47+
// not perform runtime limit checking on code or data segments.
2748
fn get_limit(entry: u64) -> u32 {
28-
((((entry) & 0x000F_0000_0000_0000) >> 32) | ((entry) & 0x0000_0000_0000_FFFF)) as u32
49+
let limit: u32 =
50+
((((entry) & 0x000F_0000_0000_0000) >> 32) | ((entry) & 0x0000_0000_0000_FFFF)) as u32;
51+
52+
// Perform manual limit scaling if G flag is set
53+
match get_g(entry) {
54+
0 => limit,
55+
_ => (limit << 12) | 0xFFF, // G flag is either 0 or 1
56+
}
2957
}
3058

3159
fn get_g(entry: u64) -> u8 {
@@ -109,7 +137,7 @@ mod tests {
109137
assert_eq!(0xB, seg.type_);
110138
// base and limit
111139
assert_eq!(0x10_0000, seg.base);
112-
assert_eq!(0xfffff, seg.limit);
140+
assert_eq!(0xffff_ffff, seg.limit);
113141
assert_eq!(0x0, seg.unusable);
114142
}
115143
}

src/vmm/src/arch/x86_64/layout.rs

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,5 +27,8 @@ pub const IRQ_MAX: u32 = 23;
2727
/// Address for the TSS setup.
2828
pub const KVM_TSS_ADDRESS: u64 = 0xfffb_d000;
2929

30+
/// Address of the hvm_start_info struct used in PVH boot
31+
pub const PVH_INFO_START: u64 = 0x6000;
32+
3033
/// The 'zero page', a.k.a linux kernel bootparams.
3134
pub const ZERO_PAGE_START: u64 = 0x7000;

src/vmm/src/arch/x86_64/regs.rs

Lines changed: 127 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
// Copyright © 2020, Oracle and/or its affiliates.
12
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
23
// SPDX-License-Identifier: Apache-2.0
34
//
@@ -11,6 +12,7 @@ use kvm_bindings::{kvm_fpu, kvm_regs, kvm_sregs};
1112
use kvm_ioctls::VcpuFd;
1213
use utils::vm_memory::{Address, Bytes, GuestAddress, GuestMemory, GuestMemoryMmap};
1314

15+
use super::super::{BootProtocol, EntryPoint};
1416
use super::gdt::{gdt_entry, kvm_segment_from_gdt};
1517

1618
// Initial pagetables.
@@ -99,20 +101,30 @@ impl fmt::Display for SetupRegistersError {
99101
/// # Errors
100102
///
101103
/// When [`kvm_ioctls::ioctls::vcpu::VcpuFd::set_regs`] errors.
102-
pub fn setup_regs(vcpu: &VcpuFd, boot_ip: u64) -> Result<(), SetupRegistersError> {
103-
let regs: kvm_regs = kvm_regs {
104-
rflags: 0x0000_0000_0000_0002u64,
105-
rip: boot_ip,
106-
// Frame pointer. It gets a snapshot of the stack pointer (rsp) so that when adjustments are
107-
// made to rsp (i.e. reserving space for local variables or pushing values on to the stack),
108-
// local variables and function parameters are still accessible from a constant offset from
109-
// rbp.
110-
rsp: super::layout::BOOT_STACK_POINTER,
111-
// Starting stack pointer.
112-
rbp: super::layout::BOOT_STACK_POINTER,
113-
// Must point to zero page address per Linux ABI. This is x86_64 specific.
114-
rsi: super::layout::ZERO_PAGE_START,
115-
..Default::default()
104+
pub fn setup_regs(vcpu: &VcpuFd, entry_point: EntryPoint) -> Result<(), SetupRegistersError> {
105+
let regs: kvm_regs = match entry_point.protocol {
106+
BootProtocol::PvhBoot => kvm_regs {
107+
// Configure regs as required by PVH boot protocol.
108+
rflags: 0x0000_0000_0000_0002u64,
109+
rbx: super::layout::PVH_INFO_START,
110+
rip: entry_point.entry_addr.raw_value(),
111+
..Default::default()
112+
},
113+
BootProtocol::LinuxBoot => kvm_regs {
114+
// Configure regs as required by Linux 64-bit boot protocol.
115+
rflags: 0x0000_0000_0000_0002u64,
116+
rip: entry_point.entry_addr.raw_value(),
117+
// Frame pointer. It gets a snapshot of the stack pointer (rsp) so that when adjustments
118+
// are made to rsp (i.e. reserving space for local variables or pushing
119+
// values on to the stack), local variables and function parameters are
120+
// still accessible from a constant offset from rbp.
121+
rsp: super::layout::BOOT_STACK_POINTER,
122+
// Starting stack pointer.
123+
rbp: super::layout::BOOT_STACK_POINTER,
124+
// Must point to zero page address per Linux ABI. This is x86_64 specific.
125+
rsi: super::layout::ZERO_PAGE_START,
126+
..Default::default()
127+
},
116128
};
117129

118130
vcpu.set_regs(&regs).map_err(SetupRegistersError)
@@ -141,6 +153,7 @@ pub enum SetupSpecialRegistersError {
141153
///
142154
/// * `mem` - The memory that will be passed to the guest.
143155
/// * `vcpu` - Structure for the VCPU that holds the VCPU's fd.
156+
/// * `boot_prot` - The boot protocol being used.
144157
///
145158
/// # Errors
146159
///
@@ -149,14 +162,21 @@ pub enum SetupSpecialRegistersError {
149162
/// - [`configure_segments_and_sregs`] errors.
150163
/// - [`setup_page_tables`] errors
151164
/// - [`kvm_ioctls::ioctls::vcpu::VcpuFd::set_sregs`] errors.
152-
pub fn setup_sregs(mem: &GuestMemoryMmap, vcpu: &VcpuFd) -> Result<(), SetupSpecialRegistersError> {
165+
pub fn setup_sregs(
166+
mem: &GuestMemoryMmap,
167+
vcpu: &VcpuFd,
168+
boot_prot: BootProtocol,
169+
) -> Result<(), SetupSpecialRegistersError> {
153170
let mut sregs: kvm_sregs = vcpu
154171
.get_sregs()
155172
.map_err(SetupSpecialRegistersError::GetSpecialRegisters)?;
156173

157-
configure_segments_and_sregs(mem, &mut sregs)
174+
configure_segments_and_sregs(mem, &mut sregs, boot_prot)
158175
.map_err(SetupSpecialRegistersError::ConfigureSegmentsAndSpecialRegisters)?;
159-
setup_page_tables(mem, &mut sregs).map_err(SetupSpecialRegistersError::SetupPageTables)?; // TODO(dgreid) - Can this be done once per system instead?
176+
if let BootProtocol::LinuxBoot = boot_prot {
177+
setup_page_tables(mem, &mut sregs).map_err(SetupSpecialRegistersError::SetupPageTables)?;
178+
// TODO(dgreid) - Can this be done once per system instead?
179+
}
160180

161181
vcpu.set_sregs(&sregs)
162182
.map_err(SetupSpecialRegistersError::SetSpecialRegisters)
@@ -171,6 +191,7 @@ const EFER_LMA: u64 = 0x400;
171191
const EFER_LME: u64 = 0x100;
172192

173193
const X86_CR0_PE: u64 = 0x1;
194+
const X86_CR0_ET: u64 = 0x10;
174195
const X86_CR0_PG: u64 = 0x8000_0000;
175196
const X86_CR4_PAE: u64 = 0x20;
176197

@@ -197,13 +218,28 @@ fn write_idt_value(val: u64, guest_mem: &GuestMemoryMmap) -> Result<(), RegsErro
197218
fn configure_segments_and_sregs(
198219
mem: &GuestMemoryMmap,
199220
sregs: &mut kvm_sregs,
221+
boot_prot: BootProtocol,
200222
) -> Result<(), RegsError> {
201-
let gdt_table: [u64; BOOT_GDT_MAX] = [
202-
gdt_entry(0, 0, 0), // NULL
203-
gdt_entry(0xa09b, 0, 0xfffff), // CODE
204-
gdt_entry(0xc093, 0, 0xfffff), // DATA
205-
gdt_entry(0x808b, 0, 0xfffff), // TSS
206-
];
223+
let gdt_table: [u64; BOOT_GDT_MAX] = match boot_prot {
224+
BootProtocol::PvhBoot => {
225+
// Configure GDT entries as specified by PVH boot protocol
226+
[
227+
gdt_entry(0, 0, 0), // NULL
228+
gdt_entry(0xc09b, 0, 0xffff_ffff), // CODE
229+
gdt_entry(0xc093, 0, 0xffff_ffff), // DATA
230+
gdt_entry(0x008b, 0, 0x67), // TSS
231+
]
232+
}
233+
BootProtocol::LinuxBoot => {
234+
// Configure GDT entries as specified by Linux 64bit boot protocol
235+
[
236+
gdt_entry(0, 0, 0), // NULL
237+
gdt_entry(0xa09b, 0, 0xfffff), // CODE
238+
gdt_entry(0xc093, 0, 0xfffff), // DATA
239+
gdt_entry(0x808b, 0, 0xfffff), // TSS
240+
]
241+
}
242+
};
207243

208244
let code_seg = kvm_segment_from_gdt(gdt_table[1], 1);
209245
let data_seg = kvm_segment_from_gdt(gdt_table[2], 2);
@@ -226,9 +262,17 @@ fn configure_segments_and_sregs(
226262
sregs.ss = data_seg;
227263
sregs.tr = tss_seg;
228264

229-
// 64-bit protected mode
230-
sregs.cr0 |= X86_CR0_PE;
231-
sregs.efer |= EFER_LME | EFER_LMA;
265+
match boot_prot {
266+
BootProtocol::PvhBoot => {
267+
sregs.cr0 = X86_CR0_PE | X86_CR0_ET;
268+
sregs.cr4 = 0;
269+
}
270+
BootProtocol::LinuxBoot => {
271+
// 64-bit protected mode
272+
sregs.cr0 |= X86_CR0_PE;
273+
sregs.efer |= EFER_LME | EFER_LMA;
274+
}
275+
}
232276

233277
Ok(())
234278
}
@@ -289,24 +333,45 @@ mod tests {
289333
gm.read_obj(read_addr).unwrap()
290334
}
291335

292-
fn validate_segments_and_sregs(gm: &GuestMemoryMmap, sregs: &kvm_sregs) {
336+
fn validate_segments_and_sregs(
337+
gm: &GuestMemoryMmap,
338+
sregs: &kvm_sregs,
339+
boot_prot: BootProtocol,
340+
) {
341+
if let BootProtocol::LinuxBoot = boot_prot {
342+
assert_eq!(0xaf_9b00_0000_ffff, read_u64(gm, BOOT_GDT_OFFSET + 8));
343+
assert_eq!(0xcf_9300_0000_ffff, read_u64(gm, BOOT_GDT_OFFSET + 16));
344+
assert_eq!(0x8f_8b00_0000_ffff, read_u64(gm, BOOT_GDT_OFFSET + 24));
345+
346+
assert_eq!(0xffff_ffff, sregs.tr.limit);
347+
348+
assert!(sregs.cr0 & X86_CR0_PE != 0);
349+
assert!(sregs.efer & EFER_LME != 0 && sregs.efer & EFER_LMA != 0);
350+
} else {
351+
// Validate values that are specific to PVH boot protocol
352+
assert_eq!(0xcf_9b00_0000_ffff, read_u64(gm, BOOT_GDT_OFFSET + 8));
353+
assert_eq!(0xcf_9300_0000_ffff, read_u64(gm, BOOT_GDT_OFFSET + 16));
354+
assert_eq!(0x00_8b00_0000_0067, read_u64(gm, BOOT_GDT_OFFSET + 24));
355+
356+
assert_eq!(0x67, sregs.tr.limit);
357+
assert_eq!(0, sregs.tr.g);
358+
359+
assert!(sregs.cr0 & X86_CR0_PE != 0 && sregs.cr0 & X86_CR0_ET != 0);
360+
assert_eq!(0, sregs.cr4);
361+
}
362+
363+
// Common settings for both PVH and Linux boot protocol
293364
assert_eq!(0x0, read_u64(gm, BOOT_GDT_OFFSET));
294-
assert_eq!(0xaf_9b00_0000_ffff, read_u64(gm, BOOT_GDT_OFFSET + 8));
295-
assert_eq!(0xcf_9300_0000_ffff, read_u64(gm, BOOT_GDT_OFFSET + 16));
296-
assert_eq!(0x8f_8b00_0000_ffff, read_u64(gm, BOOT_GDT_OFFSET + 24));
297365
assert_eq!(0x0, read_u64(gm, BOOT_IDT_OFFSET));
298366

299367
assert_eq!(0, sregs.cs.base);
300-
assert_eq!(0xfffff, sregs.ds.limit);
368+
assert_eq!(0xffff_ffff, sregs.ds.limit);
301369
assert_eq!(0x10, sregs.es.selector);
302370
assert_eq!(1, sregs.fs.present);
303371
assert_eq!(1, sregs.gs.g);
304372
assert_eq!(0, sregs.ss.avl);
305373
assert_eq!(0, sregs.tr.base);
306-
assert_eq!(0xfffff, sregs.tr.limit);
307374
assert_eq!(0, sregs.tr.avl);
308-
assert!(sregs.cr0 & X86_CR0_PE != 0);
309-
assert!(sregs.efer & EFER_LME != 0 && sregs.efer & EFER_LMA != 0);
310375
}
311376

312377
fn validate_page_tables(gm: &GuestMemoryMmap, sregs: &kvm_sregs) {
@@ -358,7 +423,12 @@ mod tests {
358423
..Default::default()
359424
};
360425

361-
setup_regs(&vcpu, expected_regs.rip).unwrap();
426+
let entry_point: EntryPoint = EntryPoint {
427+
entry_addr: GuestAddress(expected_regs.rip),
428+
protocol: BootProtocol::LinuxBoot,
429+
};
430+
431+
setup_regs(&vcpu, entry_point).unwrap();
362432

363433
let actual_regs: kvm_regs = vcpu.get_regs().unwrap();
364434
assert_eq!(actual_regs, expected_regs);
@@ -371,16 +441,22 @@ mod tests {
371441
let vcpu = vm.create_vcpu(0).unwrap();
372442
let gm = create_guest_mem(None);
373443

374-
assert!(vcpu.set_sregs(&Default::default()).is_ok());
375-
setup_sregs(&gm, &vcpu).unwrap();
376-
377-
let mut sregs: kvm_sregs = vcpu.get_sregs().unwrap();
378-
// for AMD KVM_GET_SREGS returns g = 0 for each kvm_segment.
379-
// We set it to 1, otherwise the test will fail.
380-
sregs.gs.g = 1;
381-
382-
validate_segments_and_sregs(&gm, &sregs);
383-
validate_page_tables(&gm, &sregs);
444+
[BootProtocol::LinuxBoot, BootProtocol::PvhBoot]
445+
.iter()
446+
.for_each(|boot_prot| {
447+
assert!(vcpu.set_sregs(&Default::default()).is_ok());
448+
setup_sregs(&gm, &vcpu, *boot_prot).unwrap();
449+
450+
let mut sregs: kvm_sregs = vcpu.get_sregs().unwrap();
451+
// for AMD KVM_GET_SREGS returns g = 0 for each kvm_segment.
452+
// We set it to 1, otherwise the test will fail.
453+
sregs.gs.g = 1;
454+
455+
validate_segments_and_sregs(&gm, &sregs, *boot_prot);
456+
if let BootProtocol::LinuxBoot = *boot_prot {
457+
validate_page_tables(&gm, &sregs);
458+
}
459+
});
384460
}
385461

386462
#[test]
@@ -425,9 +501,13 @@ mod tests {
425501
fn test_configure_segments_and_sregs() {
426502
let mut sregs: kvm_sregs = Default::default();
427503
let gm = create_guest_mem(None);
428-
configure_segments_and_sregs(&gm, &mut sregs).unwrap();
504+
configure_segments_and_sregs(&gm, &mut sregs, BootProtocol::LinuxBoot).unwrap();
505+
506+
validate_segments_and_sregs(&gm, &sregs, BootProtocol::LinuxBoot);
507+
508+
configure_segments_and_sregs(&gm, &mut sregs, BootProtocol::PvhBoot).unwrap();
429509

430-
validate_segments_and_sregs(&gm, &sregs);
510+
validate_segments_and_sregs(&gm, &sregs, BootProtocol::PvhBoot);
431511
}
432512

433513
#[test]

src/vmm/src/builder.rs

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -312,7 +312,7 @@ pub fn build_microvm_for_boot(
312312
&vmm,
313313
vcpus.as_mut(),
314314
&vm_resources.vm_config,
315-
entry_point.entry_addr,
315+
entry_point,
316316
&initrd,
317317
boot_cmdline,
318318
)?;
@@ -764,7 +764,7 @@ pub fn configure_system_for_boot(
764764
vmm: &Vmm,
765765
vcpus: &mut [Vcpu],
766766
vm_config: &VmConfig,
767-
entry_addr: GuestAddress,
767+
entry_point: EntryPoint,
768768
initrd: &Option<InitrdConfig>,
769769
boot_cmdline: LoaderKernelCmdline,
770770
) -> Result<(), StartMicrovmError> {
@@ -808,7 +808,7 @@ pub fn configure_system_for_boot(
808808
// Configure vCPUs with normalizing and setting the generated CPU configuration.
809809
for vcpu in vcpus.iter_mut() {
810810
vcpu.kvm_vcpu
811-
.configure(vmm.guest_memory(), entry_addr, &vcpu_config)
811+
.configure(vmm.guest_memory(), entry_point, &vcpu_config)
812812
.map_err(VmmError::VcpuConfigure)
813813
.map_err(Internal)?;
814814
}

0 commit comments

Comments
 (0)