Skip to content

Commit d294709

Browse files
cpercivaaljimenezb
authored andcommitted
pvh/arch-x86_64: Initialize vCPU regs for PVH
Set the initial values of the KVM vCPU registers as specified in the PVH boot ABI: https://xenbits.xen.org/docs/unstable/misc/pvh.html Add stub bits for aarch64; PVH mode does not exist there. Signed-off-by: Colin Percival <cperciva@freebsd.org> Co-authored-by: Alejandro Jimenez <alejandro.j.jimenez@oracle.com>
1 parent c14c2c6 commit d294709

File tree

7 files changed

+228
-74
lines changed

7 files changed

+228
-74
lines changed

src/vmm/src/arch/x86_64/gdt.rs

Lines changed: 33 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
// Copyright © 2020, Oracle and/or its affiliates.
2+
//
13
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
24
// SPDX-License-Identifier: Apache-2.0
35
//
@@ -24,8 +26,37 @@ fn get_base(entry: u64) -> u64 {
2426
| (((entry) & 0x0000_0000_FFFF_0000) >> 16)
2527
}
2628

29+
// Extract the segment limit from the GDT segment descriptor.
30+
//
31+
// In a segment descriptor, the limit field is 20 bits, so it can directly describe
32+
// a range from 0 to 0xFFFFF (1 MB). When G flag is set (4-KByte page granularity) it
33+
// scales the value in the limit field by a factor of 2^12 (4 Kbytes), making the effective
34+
// limit range from 0xFFF (4 KBytes) to 0xFFFF_FFFF (4 GBytes).
35+
//
36+
// However, the limit field in the VMCS definition is a 32 bit field, and the limit value is not
37+
// automatically scaled using the G flag. This means that for a desired range of 4GB for a
38+
// given segment, its limit must be specified as 0xFFFF_FFFF. Therefore the method of obtaining
39+
// the limit from the GDT entry is not sufficient, since it only provides 20 bits when 32 bits
40+
// are necessary. Fortunately, we can check if the G flag is set when extracting the limit since
41+
// the full GDT entry is passed as an argument, and perform the scaling of the limit value to
42+
// return the full 32 bit value.
43+
//
44+
// The scaling mentioned above is required when using PVH boot, since the guest boots in protected
45+
// (32-bit) mode and must be able to access the entire 32-bit address space. It does not cause
46+
// issues for the case of direct boot to 64-bit (long) mode, since in 64-bit mode the processor does
47+
// not perform runtime limit checking on code or data segments.
48+
//
49+
// (For more information concerning the formats of segment descriptors, VMCS fields, et cetera,
50+
// please consult the Intel Software Developer Manual.)
2751
fn get_limit(entry: u64) -> u32 {
28-
((((entry) & 0x000F_0000_0000_0000) >> 32) | ((entry) & 0x0000_0000_0000_FFFF)) as u32
52+
let limit: u32 =
53+
((((entry) & 0x000F_0000_0000_0000) >> 32) | ((entry) & 0x0000_0000_0000_FFFF)) as u32;
54+
55+
// Perform manual limit scaling if G flag is set
56+
match get_g(entry) {
57+
0 => limit,
58+
_ => (limit << 12) | 0xFFF, // G flag is either 0 or 1
59+
}
2960
}
3061

3162
fn get_g(entry: u64) -> u8 {
@@ -109,7 +140,7 @@ mod tests {
109140
assert_eq!(0xB, seg.type_);
110141
// base and limit
111142
assert_eq!(0x10_0000, seg.base);
112-
assert_eq!(0xfffff, seg.limit);
143+
assert_eq!(0xffff_ffff, seg.limit);
113144
assert_eq!(0x0, seg.unusable);
114145
}
115146
}

src/vmm/src/arch/x86_64/layout.rs

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,5 +27,8 @@ pub const IRQ_MAX: u32 = 23;
2727
/// Address for the TSS setup.
2828
pub const KVM_TSS_ADDRESS: u64 = 0xfffb_d000;
2929

30+
/// Address of the hvm_start_info struct used in PVH boot
31+
pub const PVH_INFO_START: u64 = 0x6000;
32+
3033
/// The 'zero page', a.k.a linux kernel bootparams.
3134
pub const ZERO_PAGE_START: u64 = 0x7000;

src/vmm/src/arch/x86_64/regs.rs

Lines changed: 127 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
// Copyright © 2020, Oracle and/or its affiliates.
12
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
23
// SPDX-License-Identifier: Apache-2.0
34
//
@@ -11,6 +12,7 @@ use kvm_bindings::{kvm_fpu, kvm_regs, kvm_sregs};
1112
use kvm_ioctls::VcpuFd;
1213
use utils::vm_memory::{Address, Bytes, GuestAddress, GuestMemory, GuestMemoryMmap};
1314

15+
use super::super::{BootProtocol, EntryPoint};
1416
use super::gdt::{gdt_entry, kvm_segment_from_gdt};
1517

1618
// Initial pagetables.
@@ -89,20 +91,30 @@ pub struct SetupRegistersError(utils::errno::Error);
8991
/// # Errors
9092
///
9193
/// When [`kvm_ioctls::ioctls::vcpu::VcpuFd::set_regs`] errors.
92-
pub fn setup_regs(vcpu: &VcpuFd, boot_ip: u64) -> Result<(), SetupRegistersError> {
93-
let regs: kvm_regs = kvm_regs {
94-
rflags: 0x0000_0000_0000_0002u64,
95-
rip: boot_ip,
96-
// Frame pointer. It gets a snapshot of the stack pointer (rsp) so that when adjustments are
97-
// made to rsp (i.e. reserving space for local variables or pushing values on to the stack),
98-
// local variables and function parameters are still accessible from a constant offset from
99-
// rbp.
100-
rsp: super::layout::BOOT_STACK_POINTER,
101-
// Starting stack pointer.
102-
rbp: super::layout::BOOT_STACK_POINTER,
103-
// Must point to zero page address per Linux ABI. This is x86_64 specific.
104-
rsi: super::layout::ZERO_PAGE_START,
105-
..Default::default()
94+
pub fn setup_regs(vcpu: &VcpuFd, entry_point: EntryPoint) -> Result<(), SetupRegistersError> {
95+
let regs: kvm_regs = match entry_point.protocol {
96+
BootProtocol::PvhBoot => kvm_regs {
97+
// Configure regs as required by PVH boot protocol.
98+
rflags: 0x0000_0000_0000_0002u64,
99+
rbx: super::layout::PVH_INFO_START,
100+
rip: entry_point.entry_addr.raw_value(),
101+
..Default::default()
102+
},
103+
BootProtocol::LinuxBoot => kvm_regs {
104+
// Configure regs as required by Linux 64-bit boot protocol.
105+
rflags: 0x0000_0000_0000_0002u64,
106+
rip: entry_point.entry_addr.raw_value(),
107+
// Frame pointer. It gets a snapshot of the stack pointer (rsp) so that when adjustments
108+
// are made to rsp (i.e. reserving space for local variables or pushing
109+
// values on to the stack), local variables and function parameters are
110+
// still accessible from a constant offset from rbp.
111+
rsp: super::layout::BOOT_STACK_POINTER,
112+
// Starting stack pointer.
113+
rbp: super::layout::BOOT_STACK_POINTER,
114+
// Must point to zero page address per Linux ABI. This is x86_64 specific.
115+
rsi: super::layout::ZERO_PAGE_START,
116+
..Default::default()
117+
},
106118
};
107119

108120
vcpu.set_regs(&regs).map_err(SetupRegistersError)
@@ -131,6 +143,7 @@ pub enum SetupSpecialRegistersError {
131143
///
132144
/// * `mem` - The memory that will be passed to the guest.
133145
/// * `vcpu` - Structure for the VCPU that holds the VCPU's fd.
146+
/// * `boot_prot` - The boot protocol being used.
134147
///
135148
/// # Errors
136149
///
@@ -139,14 +152,21 @@ pub enum SetupSpecialRegistersError {
139152
/// - [`configure_segments_and_sregs`] errors.
140153
/// - [`setup_page_tables`] errors
141154
/// - [`kvm_ioctls::ioctls::vcpu::VcpuFd::set_sregs`] errors.
142-
pub fn setup_sregs(mem: &GuestMemoryMmap, vcpu: &VcpuFd) -> Result<(), SetupSpecialRegistersError> {
155+
pub fn setup_sregs(
156+
mem: &GuestMemoryMmap,
157+
vcpu: &VcpuFd,
158+
boot_prot: BootProtocol,
159+
) -> Result<(), SetupSpecialRegistersError> {
143160
let mut sregs: kvm_sregs = vcpu
144161
.get_sregs()
145162
.map_err(SetupSpecialRegistersError::GetSpecialRegisters)?;
146163

147-
configure_segments_and_sregs(mem, &mut sregs)
164+
configure_segments_and_sregs(mem, &mut sregs, boot_prot)
148165
.map_err(SetupSpecialRegistersError::ConfigureSegmentsAndSpecialRegisters)?;
149-
setup_page_tables(mem, &mut sregs).map_err(SetupSpecialRegistersError::SetupPageTables)?; // TODO(dgreid) - Can this be done once per system instead?
166+
if let BootProtocol::LinuxBoot = boot_prot {
167+
setup_page_tables(mem, &mut sregs).map_err(SetupSpecialRegistersError::SetupPageTables)?;
168+
// TODO(dgreid) - Can this be done once per system instead?
169+
}
150170

151171
vcpu.set_sregs(&sregs)
152172
.map_err(SetupSpecialRegistersError::SetSpecialRegisters)
@@ -161,6 +181,7 @@ const EFER_LMA: u64 = 0x400;
161181
const EFER_LME: u64 = 0x100;
162182

163183
const X86_CR0_PE: u64 = 0x1;
184+
const X86_CR0_ET: u64 = 0x10;
164185
const X86_CR0_PG: u64 = 0x8000_0000;
165186
const X86_CR4_PAE: u64 = 0x20;
166187

@@ -187,13 +208,28 @@ fn write_idt_value(val: u64, guest_mem: &GuestMemoryMmap) -> Result<(), RegsErro
187208
fn configure_segments_and_sregs(
188209
mem: &GuestMemoryMmap,
189210
sregs: &mut kvm_sregs,
211+
boot_prot: BootProtocol,
190212
) -> Result<(), RegsError> {
191-
let gdt_table: [u64; BOOT_GDT_MAX] = [
192-
gdt_entry(0, 0, 0), // NULL
193-
gdt_entry(0xa09b, 0, 0xfffff), // CODE
194-
gdt_entry(0xc093, 0, 0xfffff), // DATA
195-
gdt_entry(0x808b, 0, 0xfffff), // TSS
196-
];
213+
let gdt_table: [u64; BOOT_GDT_MAX] = match boot_prot {
214+
BootProtocol::PvhBoot => {
215+
// Configure GDT entries as specified by PVH boot protocol
216+
[
217+
gdt_entry(0, 0, 0), // NULL
218+
gdt_entry(0xc09b, 0, 0xffff_ffff), // CODE
219+
gdt_entry(0xc093, 0, 0xffff_ffff), // DATA
220+
gdt_entry(0x008b, 0, 0x67), // TSS
221+
]
222+
}
223+
BootProtocol::LinuxBoot => {
224+
// Configure GDT entries as specified by Linux 64bit boot protocol
225+
[
226+
gdt_entry(0, 0, 0), // NULL
227+
gdt_entry(0xa09b, 0, 0xfffff), // CODE
228+
gdt_entry(0xc093, 0, 0xfffff), // DATA
229+
gdt_entry(0x808b, 0, 0xfffff), // TSS
230+
]
231+
}
232+
};
197233

198234
let code_seg = kvm_segment_from_gdt(gdt_table[1], 1);
199235
let data_seg = kvm_segment_from_gdt(gdt_table[2], 2);
@@ -216,9 +252,17 @@ fn configure_segments_and_sregs(
216252
sregs.ss = data_seg;
217253
sregs.tr = tss_seg;
218254

219-
// 64-bit protected mode
220-
sregs.cr0 |= X86_CR0_PE;
221-
sregs.efer |= EFER_LME | EFER_LMA;
255+
match boot_prot {
256+
BootProtocol::PvhBoot => {
257+
sregs.cr0 = X86_CR0_PE | X86_CR0_ET;
258+
sregs.cr4 = 0;
259+
}
260+
BootProtocol::LinuxBoot => {
261+
// 64-bit protected mode
262+
sregs.cr0 |= X86_CR0_PE;
263+
sregs.efer |= EFER_LME | EFER_LMA;
264+
}
265+
}
222266

223267
Ok(())
224268
}
@@ -279,24 +323,45 @@ mod tests {
279323
gm.read_obj(read_addr).unwrap()
280324
}
281325

282-
fn validate_segments_and_sregs(gm: &GuestMemoryMmap, sregs: &kvm_sregs) {
326+
fn validate_segments_and_sregs(
327+
gm: &GuestMemoryMmap,
328+
sregs: &kvm_sregs,
329+
boot_prot: BootProtocol,
330+
) {
331+
if let BootProtocol::LinuxBoot = boot_prot {
332+
assert_eq!(0xaf_9b00_0000_ffff, read_u64(gm, BOOT_GDT_OFFSET + 8));
333+
assert_eq!(0xcf_9300_0000_ffff, read_u64(gm, BOOT_GDT_OFFSET + 16));
334+
assert_eq!(0x8f_8b00_0000_ffff, read_u64(gm, BOOT_GDT_OFFSET + 24));
335+
336+
assert_eq!(0xffff_ffff, sregs.tr.limit);
337+
338+
assert!(sregs.cr0 & X86_CR0_PE != 0);
339+
assert!(sregs.efer & EFER_LME != 0 && sregs.efer & EFER_LMA != 0);
340+
} else {
341+
// Validate values that are specific to PVH boot protocol
342+
assert_eq!(0xcf_9b00_0000_ffff, read_u64(gm, BOOT_GDT_OFFSET + 8));
343+
assert_eq!(0xcf_9300_0000_ffff, read_u64(gm, BOOT_GDT_OFFSET + 16));
344+
assert_eq!(0x00_8b00_0000_0067, read_u64(gm, BOOT_GDT_OFFSET + 24));
345+
346+
assert_eq!(0x67, sregs.tr.limit);
347+
assert_eq!(0, sregs.tr.g);
348+
349+
assert!(sregs.cr0 & X86_CR0_PE != 0 && sregs.cr0 & X86_CR0_ET != 0);
350+
assert_eq!(0, sregs.cr4);
351+
}
352+
353+
// Common settings for both PVH and Linux boot protocol
283354
assert_eq!(0x0, read_u64(gm, BOOT_GDT_OFFSET));
284-
assert_eq!(0xaf_9b00_0000_ffff, read_u64(gm, BOOT_GDT_OFFSET + 8));
285-
assert_eq!(0xcf_9300_0000_ffff, read_u64(gm, BOOT_GDT_OFFSET + 16));
286-
assert_eq!(0x8f_8b00_0000_ffff, read_u64(gm, BOOT_GDT_OFFSET + 24));
287355
assert_eq!(0x0, read_u64(gm, BOOT_IDT_OFFSET));
288356

289357
assert_eq!(0, sregs.cs.base);
290-
assert_eq!(0xfffff, sregs.ds.limit);
358+
assert_eq!(0xffff_ffff, sregs.ds.limit);
291359
assert_eq!(0x10, sregs.es.selector);
292360
assert_eq!(1, sregs.fs.present);
293361
assert_eq!(1, sregs.gs.g);
294362
assert_eq!(0, sregs.ss.avl);
295363
assert_eq!(0, sregs.tr.base);
296-
assert_eq!(0xfffff, sregs.tr.limit);
297364
assert_eq!(0, sregs.tr.avl);
298-
assert!(sregs.cr0 & X86_CR0_PE != 0);
299-
assert!(sregs.efer & EFER_LME != 0 && sregs.efer & EFER_LMA != 0);
300365
}
301366

302367
fn validate_page_tables(gm: &GuestMemoryMmap, sregs: &kvm_sregs) {
@@ -348,7 +413,12 @@ mod tests {
348413
..Default::default()
349414
};
350415

351-
setup_regs(&vcpu, expected_regs.rip).unwrap();
416+
let entry_point: EntryPoint = EntryPoint {
417+
entry_addr: GuestAddress(expected_regs.rip),
418+
protocol: BootProtocol::LinuxBoot,
419+
};
420+
421+
setup_regs(&vcpu, entry_point).unwrap();
352422

353423
let actual_regs: kvm_regs = vcpu.get_regs().unwrap();
354424
assert_eq!(actual_regs, expected_regs);
@@ -361,16 +431,22 @@ mod tests {
361431
let vcpu = vm.create_vcpu(0).unwrap();
362432
let gm = create_guest_mem(None);
363433

364-
assert!(vcpu.set_sregs(&Default::default()).is_ok());
365-
setup_sregs(&gm, &vcpu).unwrap();
366-
367-
let mut sregs: kvm_sregs = vcpu.get_sregs().unwrap();
368-
// for AMD KVM_GET_SREGS returns g = 0 for each kvm_segment.
369-
// We set it to 1, otherwise the test will fail.
370-
sregs.gs.g = 1;
371-
372-
validate_segments_and_sregs(&gm, &sregs);
373-
validate_page_tables(&gm, &sregs);
434+
[BootProtocol::LinuxBoot, BootProtocol::PvhBoot]
435+
.iter()
436+
.for_each(|boot_prot| {
437+
assert!(vcpu.set_sregs(&Default::default()).is_ok());
438+
setup_sregs(&gm, &vcpu, *boot_prot).unwrap();
439+
440+
let mut sregs: kvm_sregs = vcpu.get_sregs().unwrap();
441+
// for AMD KVM_GET_SREGS returns g = 0 for each kvm_segment.
442+
// We set it to 1, otherwise the test will fail.
443+
sregs.gs.g = 1;
444+
445+
validate_segments_and_sregs(&gm, &sregs, *boot_prot);
446+
if let BootProtocol::LinuxBoot = *boot_prot {
447+
validate_page_tables(&gm, &sregs);
448+
}
449+
});
374450
}
375451

376452
#[test]
@@ -415,9 +491,13 @@ mod tests {
415491
fn test_configure_segments_and_sregs() {
416492
let mut sregs: kvm_sregs = Default::default();
417493
let gm = create_guest_mem(None);
418-
configure_segments_and_sregs(&gm, &mut sregs).unwrap();
494+
configure_segments_and_sregs(&gm, &mut sregs, BootProtocol::LinuxBoot).unwrap();
495+
496+
validate_segments_and_sregs(&gm, &sregs, BootProtocol::LinuxBoot);
497+
498+
configure_segments_and_sregs(&gm, &mut sregs, BootProtocol::PvhBoot).unwrap();
419499

420-
validate_segments_and_sregs(&gm, &sregs);
500+
validate_segments_and_sregs(&gm, &sregs, BootProtocol::PvhBoot);
421501
}
422502

423503
#[test]

src/vmm/src/builder.rs

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -315,7 +315,7 @@ pub fn build_microvm_for_boot(
315315
&vmm,
316316
vcpus.as_mut(),
317317
&vm_resources.vm_config,
318-
entry_point.entry_addr,
318+
entry_point,
319319
&initrd,
320320
boot_cmdline,
321321
)?;
@@ -776,7 +776,7 @@ pub fn configure_system_for_boot(
776776
vmm: &Vmm,
777777
vcpus: &mut [Vcpu],
778778
vm_config: &VmConfig,
779-
entry_addr: GuestAddress,
779+
entry_point: EntryPoint,
780780
initrd: &Option<InitrdConfig>,
781781
boot_cmdline: LoaderKernelCmdline,
782782
) -> Result<(), StartMicrovmError> {
@@ -820,7 +820,7 @@ pub fn configure_system_for_boot(
820820
// Configure vCPUs with normalizing and setting the generated CPU configuration.
821821
for vcpu in vcpus.iter_mut() {
822822
vcpu.kvm_vcpu
823-
.configure(vmm.guest_memory(), entry_addr, &vcpu_config)
823+
.configure(vmm.guest_memory(), entry_point, &vcpu_config)
824824
.map_err(VmmError::VcpuConfigure)
825825
.map_err(Internal)?;
826826
}

0 commit comments

Comments
 (0)