1
+ // Copyright © 2020, Oracle and/or its affiliates.
1
2
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2
3
// SPDX-License-Identifier: Apache-2.0
3
4
//
@@ -11,6 +12,7 @@ use kvm_bindings::{kvm_fpu, kvm_regs, kvm_sregs};
11
12
use kvm_ioctls:: VcpuFd ;
12
13
use vm_memory:: { Address , Bytes , GuestAddress , GuestMemory , GuestMemoryMmap } ;
13
14
15
+ use super :: super :: { BootProtocol , EntryPoint } ;
14
16
use super :: gdt:: { gdt_entry, kvm_segment_from_gdt} ;
15
17
16
18
// Initial pagetables.
@@ -101,20 +103,33 @@ impl fmt::Display for SetupRegistersError {
101
103
/// # Errors
102
104
///
103
105
/// When [`kvm_ioctls::ioctls::vcpu::VcpuFd::set_regs`] errors.
104
- pub fn setup_regs ( vcpu : & VcpuFd , boot_ip : u64 ) -> std:: result:: Result < ( ) , SetupRegistersError > {
105
- let regs: kvm_regs = kvm_regs {
106
- rflags : 0x0000_0000_0000_0002u64 ,
107
- rip : boot_ip,
108
- // Frame pointer. It gets a snapshot of the stack pointer (rsp) so that when adjustments are
109
- // made to rsp (i.e. reserving space for local variables or pushing values on to the stack),
110
- // local variables and function parameters are still accessible from a constant offset from
111
- // rbp.
112
- rsp : super :: layout:: BOOT_STACK_POINTER as u64 ,
113
- // Starting stack pointer.
114
- rbp : super :: layout:: BOOT_STACK_POINTER as u64 ,
115
- // Must point to zero page address per Linux ABI. This is x86_64 specific.
116
- rsi : super :: layout:: ZERO_PAGE_START as u64 ,
117
- ..Default :: default ( )
106
+ pub fn setup_regs (
107
+ vcpu : & VcpuFd ,
108
+ entry_point : EntryPoint ,
109
+ ) -> std:: result:: Result < ( ) , SetupRegistersError > {
110
+ let regs: kvm_regs = match entry_point. protocol {
111
+ BootProtocol :: PvhBoot => kvm_regs {
112
+ // Configure regs as required by PVH boot protocol.
113
+ rflags : 0x0000_0000_0000_0002u64 ,
114
+ rbx : super :: layout:: PVH_INFO_START ,
115
+ rip : entry_point. entry_addr . raw_value ( ) ,
116
+ ..Default :: default ( )
117
+ } ,
118
+ BootProtocol :: LinuxBoot => kvm_regs {
119
+ // Configure regs as required by Linux 64-bit boot protocol.
120
+ rflags : 0x0000_0000_0000_0002u64 ,
121
+ rip : entry_point. entry_addr . raw_value ( ) ,
122
+ // Frame pointer. It gets a snapshot of the stack pointer (rsp) so that when adjustments
123
+ // are made to rsp (i.e. reserving space for local variables or pushing
124
+ // values on to the stack), local variables and function parameters are
125
+ // still accessible from a constant offset from rbp.
126
+ rsp : super :: layout:: BOOT_STACK_POINTER as u64 ,
127
+ // Starting stack pointer.
128
+ rbp : super :: layout:: BOOT_STACK_POINTER as u64 ,
129
+ // Must point to zero page address per Linux ABI. This is x86_64 specific.
130
+ rsi : super :: layout:: ZERO_PAGE_START as u64 ,
131
+ ..Default :: default ( )
132
+ } ,
118
133
} ;
119
134
120
135
vcpu. set_regs ( & regs) . map_err ( SetupRegistersError )
@@ -143,6 +158,7 @@ pub enum SetupSpecialRegistersError {
143
158
///
144
159
/// * `mem` - The memory that will be passed to the guest.
145
160
/// * `vcpu` - Structure for the VCPU that holds the VCPU's fd.
161
+ /// * `boot_prot` - The boot protocol being used.
146
162
///
147
163
/// # Errors
148
164
///
@@ -154,14 +170,18 @@ pub enum SetupSpecialRegistersError {
154
170
pub fn setup_sregs (
155
171
mem : & GuestMemoryMmap ,
156
172
vcpu : & VcpuFd ,
173
+ boot_prot : BootProtocol ,
157
174
) -> std:: result:: Result < ( ) , SetupSpecialRegistersError > {
158
175
let mut sregs: kvm_sregs = vcpu
159
176
. get_sregs ( )
160
177
. map_err ( SetupSpecialRegistersError :: GetSpecialRegisters ) ?;
161
178
162
- configure_segments_and_sregs ( mem, & mut sregs)
179
+ configure_segments_and_sregs ( mem, & mut sregs, boot_prot )
163
180
. map_err ( SetupSpecialRegistersError :: ConfigureSegmentsAndSpecialRegisters ) ?;
164
- setup_page_tables ( mem, & mut sregs) . map_err ( SetupSpecialRegistersError :: SetupPageTables ) ?; // TODO(dgreid) - Can this be done once per system instead?
181
+ if let BootProtocol :: LinuxBoot = boot_prot {
182
+ setup_page_tables ( mem, & mut sregs) . map_err ( SetupSpecialRegistersError :: SetupPageTables ) ?;
183
+ // TODO(dgreid) - Can this be done once per system instead?
184
+ }
165
185
166
186
vcpu. set_sregs ( & sregs)
167
187
. map_err ( SetupSpecialRegistersError :: SetSpecialRegisters )
@@ -176,6 +196,7 @@ const EFER_LMA: u64 = 0x400;
176
196
const EFER_LME : u64 = 0x100 ;
177
197
178
198
const X86_CR0_PE : u64 = 0x1 ;
199
+ const X86_CR0_ET : u64 = 0x10 ;
179
200
const X86_CR0_PG : u64 = 0x8000_0000 ;
180
201
const X86_CR4_PAE : u64 = 0x20 ;
181
202
@@ -199,13 +220,31 @@ fn write_idt_value(val: u64, guest_mem: &GuestMemoryMmap) -> Result<()> {
199
220
. map_err ( |_| Error :: WriteIDT )
200
221
}
201
222
202
- fn configure_segments_and_sregs ( mem : & GuestMemoryMmap , sregs : & mut kvm_sregs ) -> Result < ( ) > {
203
- let gdt_table: [ u64 ; BOOT_GDT_MAX as usize ] = [
204
- gdt_entry ( 0 , 0 , 0 ) , // NULL
205
- gdt_entry ( 0xa09b , 0 , 0xfffff ) , // CODE
206
- gdt_entry ( 0xc093 , 0 , 0xfffff ) , // DATA
207
- gdt_entry ( 0x808b , 0 , 0xfffff ) , // TSS
208
- ] ;
223
+ fn configure_segments_and_sregs (
224
+ mem : & GuestMemoryMmap ,
225
+ sregs : & mut kvm_sregs ,
226
+ boot_prot : BootProtocol ,
227
+ ) -> Result < ( ) > {
228
+ let gdt_table: [ u64 ; BOOT_GDT_MAX as usize ] = match boot_prot {
229
+ BootProtocol :: PvhBoot => {
230
+ // Configure GDT entries as specified by PVH boot protocol
231
+ [
232
+ gdt_entry ( 0 , 0 , 0 ) , // NULL
233
+ gdt_entry ( 0xc09b , 0 , 0xffff_ffff ) , // CODE
234
+ gdt_entry ( 0xc093 , 0 , 0xffff_ffff ) , // DATA
235
+ gdt_entry ( 0x008b , 0 , 0x67 ) , // TSS
236
+ ]
237
+ }
238
+ BootProtocol :: LinuxBoot => {
239
+ // Configure GDT entries as specified by Linux 64bit boot protocol
240
+ [
241
+ gdt_entry ( 0 , 0 , 0 ) , // NULL
242
+ gdt_entry ( 0xa09b , 0 , 0xfffff ) , // CODE
243
+ gdt_entry ( 0xc093 , 0 , 0xfffff ) , // DATA
244
+ gdt_entry ( 0x808b , 0 , 0xfffff ) , // TSS
245
+ ]
246
+ }
247
+ } ;
209
248
210
249
let code_seg = kvm_segment_from_gdt ( gdt_table[ 1 ] , 1 ) ;
211
250
let data_seg = kvm_segment_from_gdt ( gdt_table[ 2 ] , 2 ) ;
@@ -228,9 +267,17 @@ fn configure_segments_and_sregs(mem: &GuestMemoryMmap, sregs: &mut kvm_sregs) ->
228
267
sregs. ss = data_seg;
229
268
sregs. tr = tss_seg;
230
269
231
- // 64-bit protected mode
232
- sregs. cr0 |= X86_CR0_PE ;
233
- sregs. efer |= EFER_LME | EFER_LMA ;
270
+ match boot_prot {
271
+ BootProtocol :: PvhBoot => {
272
+ sregs. cr0 = X86_CR0_PE | X86_CR0_ET ;
273
+ sregs. cr4 = 0 ;
274
+ }
275
+ BootProtocol :: LinuxBoot => {
276
+ // 64-bit protected mode
277
+ sregs. cr0 |= X86_CR0_PE ;
278
+ sregs. efer |= EFER_LME | EFER_LMA ;
279
+ }
280
+ }
234
281
235
282
Ok ( ( ) )
236
283
}
@@ -288,24 +335,45 @@ mod tests {
288
335
gm. read_obj ( read_addr) . unwrap ( )
289
336
}
290
337
291
- fn validate_segments_and_sregs ( gm : & GuestMemoryMmap , sregs : & kvm_sregs ) {
338
+ fn validate_segments_and_sregs (
339
+ gm : & GuestMemoryMmap ,
340
+ sregs : & kvm_sregs ,
341
+ boot_prot : BootProtocol ,
342
+ ) {
343
+ if let BootProtocol :: LinuxBoot = boot_prot {
344
+ assert_eq ! ( 0xaf_9b00_0000_ffff , read_u64( & gm, BOOT_GDT_OFFSET + 8 ) ) ;
345
+ assert_eq ! ( 0xcf_9300_0000_ffff , read_u64( & gm, BOOT_GDT_OFFSET + 16 ) ) ;
346
+ assert_eq ! ( 0x8f_8b00_0000_ffff , read_u64( & gm, BOOT_GDT_OFFSET + 24 ) ) ;
347
+
348
+ assert_eq ! ( 0xffff_ffff , sregs. tr. limit) ;
349
+
350
+ assert ! ( sregs. cr0 & X86_CR0_PE != 0 ) ;
351
+ assert ! ( sregs. efer & EFER_LME != 0 && sregs. efer & EFER_LMA != 0 ) ;
352
+ } else {
353
+ // Validate values that are specific to PVH boot protocol
354
+ assert_eq ! ( 0xcf_9b00_0000_ffff , read_u64( & gm, BOOT_GDT_OFFSET + 8 ) ) ;
355
+ assert_eq ! ( 0xcf_9300_0000_ffff , read_u64( & gm, BOOT_GDT_OFFSET + 16 ) ) ;
356
+ assert_eq ! ( 0x00_8b00_0000_0067 , read_u64( & gm, BOOT_GDT_OFFSET + 24 ) ) ;
357
+
358
+ assert_eq ! ( 0x67 , sregs. tr. limit) ;
359
+ assert_eq ! ( 0 , sregs. tr. g) ;
360
+
361
+ assert ! ( sregs. cr0 & X86_CR0_PE != 0 && sregs. cr0 & X86_CR0_ET != 0 ) ;
362
+ assert_eq ! ( 0 , sregs. cr4) ;
363
+ }
364
+
365
+ // Common settings for both PVH and Linux boot protocol
292
366
assert_eq ! ( 0x0 , read_u64( & gm, BOOT_GDT_OFFSET ) ) ;
293
- assert_eq ! ( 0xaf_9b00_0000_ffff , read_u64( & gm, BOOT_GDT_OFFSET + 8 ) ) ;
294
- assert_eq ! ( 0xcf_9300_0000_ffff , read_u64( & gm, BOOT_GDT_OFFSET + 16 ) ) ;
295
- assert_eq ! ( 0x8f_8b00_0000_ffff , read_u64( & gm, BOOT_GDT_OFFSET + 24 ) ) ;
296
367
assert_eq ! ( 0x0 , read_u64( & gm, BOOT_IDT_OFFSET ) ) ;
297
368
298
369
assert_eq ! ( 0 , sregs. cs. base) ;
299
- assert_eq ! ( 0xfffff , sregs. ds. limit) ;
370
+ assert_eq ! ( 0xffff_ffff , sregs. ds. limit) ;
300
371
assert_eq ! ( 0x10 , sregs. es. selector) ;
301
372
assert_eq ! ( 1 , sregs. fs. present) ;
302
373
assert_eq ! ( 1 , sregs. gs. g) ;
303
374
assert_eq ! ( 0 , sregs. ss. avl) ;
304
375
assert_eq ! ( 0 , sregs. tr. base) ;
305
- assert_eq ! ( 0xfffff , sregs. tr. limit) ;
306
376
assert_eq ! ( 0 , sregs. tr. avl) ;
307
- assert ! ( sregs. cr0 & X86_CR0_PE != 0 ) ;
308
- assert ! ( sregs. efer & EFER_LME != 0 && sregs. efer & EFER_LMA != 0 ) ;
309
377
}
310
378
311
379
fn validate_page_tables ( gm : & GuestMemoryMmap , sregs : & kvm_sregs ) {
@@ -357,7 +425,12 @@ mod tests {
357
425
..Default :: default ( )
358
426
} ;
359
427
360
- setup_regs ( & vcpu, expected_regs. rip ) . unwrap ( ) ;
428
+ let entry_point: EntryPoint = EntryPoint {
429
+ entry_addr : GuestAddress ( expected_regs. rip ) ,
430
+ protocol : BootProtocol :: LinuxBoot ,
431
+ } ;
432
+
433
+ setup_regs ( & vcpu, entry_point) . unwrap ( ) ;
361
434
362
435
let actual_regs: kvm_regs = vcpu. get_regs ( ) . unwrap ( ) ;
363
436
assert_eq ! ( actual_regs, expected_regs) ;
@@ -370,16 +443,22 @@ mod tests {
370
443
let vcpu = vm. create_vcpu ( 0 ) . unwrap ( ) ;
371
444
let gm = create_guest_mem ( None ) ;
372
445
373
- assert ! ( vcpu. set_sregs( & Default :: default ( ) ) . is_ok( ) ) ;
374
- setup_sregs ( & gm, & vcpu) . unwrap ( ) ;
375
-
376
- let mut sregs: kvm_sregs = vcpu. get_sregs ( ) . unwrap ( ) ;
377
- // for AMD KVM_GET_SREGS returns g = 0 for each kvm_segment.
378
- // We set it to 1, otherwise the test will fail.
379
- sregs. gs . g = 1 ;
380
-
381
- validate_segments_and_sregs ( & gm, & sregs) ;
382
- validate_page_tables ( & gm, & sregs) ;
446
+ [ BootProtocol :: LinuxBoot , BootProtocol :: PvhBoot ]
447
+ . iter ( )
448
+ . for_each ( |boot_prot| {
449
+ assert ! ( vcpu. set_sregs( & Default :: default ( ) ) . is_ok( ) ) ;
450
+ setup_sregs ( & gm, & vcpu, * boot_prot) . unwrap ( ) ;
451
+
452
+ let mut sregs: kvm_sregs = vcpu. get_sregs ( ) . unwrap ( ) ;
453
+ // for AMD KVM_GET_SREGS returns g = 0 for each kvm_segment.
454
+ // We set it to 1, otherwise the test will fail.
455
+ sregs. gs . g = 1 ;
456
+
457
+ validate_segments_and_sregs ( & gm, & sregs, * boot_prot) ;
458
+ if let BootProtocol :: LinuxBoot = * boot_prot {
459
+ validate_page_tables ( & gm, & sregs) ;
460
+ }
461
+ } ) ;
383
462
}
384
463
385
464
#[ test]
@@ -424,9 +503,13 @@ mod tests {
424
503
fn test_configure_segments_and_sregs ( ) {
425
504
let mut sregs: kvm_sregs = Default :: default ( ) ;
426
505
let gm = create_guest_mem ( None ) ;
427
- configure_segments_and_sregs ( & gm, & mut sregs) . unwrap ( ) ;
506
+ configure_segments_and_sregs ( & gm, & mut sregs, BootProtocol :: LinuxBoot ) . unwrap ( ) ;
507
+
508
+ validate_segments_and_sregs ( & gm, & sregs, BootProtocol :: LinuxBoot ) ;
509
+
510
+ configure_segments_and_sregs ( & gm, & mut sregs, BootProtocol :: PvhBoot ) . unwrap ( ) ;
428
511
429
- validate_segments_and_sregs ( & gm, & sregs) ;
512
+ validate_segments_and_sregs ( & gm, & sregs, BootProtocol :: PvhBoot ) ;
430
513
}
431
514
432
515
#[ test]
0 commit comments