19
19
20
20
vm_vaddr_t exception_handlers ;
21
21
22
- /* Virtual translation table structure declarations */
23
- struct pageUpperEntry {
24
- uint64_t present :1 ;
25
- uint64_t writable :1 ;
26
- uint64_t user :1 ;
27
- uint64_t write_through :1 ;
28
- uint64_t cache_disable :1 ;
29
- uint64_t accessed :1 ;
30
- uint64_t ignored_06 :1 ;
31
- uint64_t page_size :1 ;
32
- uint64_t ignored_11_08 :4 ;
33
- uint64_t pfn :40 ;
34
- uint64_t ignored_62_52 :11 ;
35
- uint64_t execute_disable :1 ;
36
- };
37
-
38
- struct pageTableEntry {
39
- uint64_t present :1 ;
40
- uint64_t writable :1 ;
41
- uint64_t user :1 ;
42
- uint64_t write_through :1 ;
43
- uint64_t cache_disable :1 ;
44
- uint64_t accessed :1 ;
45
- uint64_t dirty :1 ;
46
- uint64_t reserved_07 :1 ;
47
- uint64_t global :1 ;
48
- uint64_t ignored_11_09 :3 ;
49
- uint64_t pfn :40 ;
50
- uint64_t ignored_62_52 :11 ;
51
- uint64_t execute_disable :1 ;
52
- };
53
-
54
22
void regs_dump (FILE * stream , struct kvm_regs * regs ,
55
23
uint8_t indent )
56
24
{
@@ -195,23 +163,21 @@ static void *virt_get_pte(struct kvm_vm *vm, uint64_t pt_pfn, uint64_t vaddr,
195
163
return & page_table [index ];
196
164
}
197
165
198
- static struct pageUpperEntry * virt_create_upper_pte (struct kvm_vm * vm ,
199
- uint64_t pt_pfn ,
200
- uint64_t vaddr ,
201
- uint64_t paddr ,
202
- int level ,
203
- enum x86_page_size page_size )
166
+ static uint64_t * virt_create_upper_pte (struct kvm_vm * vm ,
167
+ uint64_t pt_pfn ,
168
+ uint64_t vaddr ,
169
+ uint64_t paddr ,
170
+ int level ,
171
+ enum x86_page_size page_size )
204
172
{
205
- struct pageUpperEntry * pte = virt_get_pte (vm , pt_pfn , vaddr , level );
206
-
207
- if (!pte -> present ) {
208
- pte -> writable = true;
209
- pte -> present = true;
210
- pte -> page_size = (level == page_size );
211
- if (pte -> page_size )
212
- pte -> pfn = paddr >> vm -> page_shift ;
173
+ uint64_t * pte = virt_get_pte (vm , pt_pfn , vaddr , level );
174
+
175
+ if (!(* pte & PTE_PRESENT_MASK )) {
176
+ * pte = PTE_PRESENT_MASK | PTE_WRITABLE_MASK ;
177
+ if (level == page_size )
178
+ * pte |= PTE_LARGE_MASK | (paddr & PHYSICAL_PAGE_MASK );
213
179
else
214
- pte -> pfn = vm_alloc_page_table (vm ) >> vm -> page_shift ;
180
+ * pte | = vm_alloc_page_table (vm ) & PHYSICAL_PAGE_MASK ;
215
181
} else {
216
182
/*
217
183
* Entry already present. Assert that the caller doesn't want
@@ -221,7 +187,7 @@ static struct pageUpperEntry *virt_create_upper_pte(struct kvm_vm *vm,
221
187
TEST_ASSERT (level != page_size ,
222
188
"Cannot create hugepage at level: %u, vaddr: 0x%lx\n" ,
223
189
page_size , vaddr );
224
- TEST_ASSERT (!pte -> page_size ,
190
+ TEST_ASSERT (!( * pte & PTE_LARGE_MASK ) ,
225
191
"Cannot create page table at level: %u, vaddr: 0x%lx\n" ,
226
192
level , vaddr );
227
193
}
@@ -232,8 +198,8 @@ void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
232
198
enum x86_page_size page_size )
233
199
{
234
200
const uint64_t pg_size = 1ull << ((page_size * 9 ) + 12 );
235
- struct pageUpperEntry * pml4e , * pdpe , * pde ;
236
- struct pageTableEntry * pte ;
201
+ uint64_t * pml4e , * pdpe , * pde ;
202
+ uint64_t * pte ;
237
203
238
204
TEST_ASSERT (vm -> mode == VM_MODE_PXXV48_4K ,
239
205
"Unknown or unsupported guest mode, mode: 0x%x" , vm -> mode );
@@ -257,37 +223,35 @@ void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
257
223
*/
258
224
pml4e = virt_create_upper_pte (vm , vm -> pgd >> vm -> page_shift ,
259
225
vaddr , paddr , 3 , page_size );
260
- if (pml4e -> page_size )
226
+ if (* pml4e & PTE_LARGE_MASK )
261
227
return ;
262
228
263
- pdpe = virt_create_upper_pte (vm , pml4e -> pfn , vaddr , paddr , 2 , page_size );
264
- if (pdpe -> page_size )
229
+ pdpe = virt_create_upper_pte (vm , PTE_GET_PFN ( * pml4e ) , vaddr , paddr , 2 , page_size );
230
+ if (* pdpe & PTE_LARGE_MASK )
265
231
return ;
266
232
267
- pde = virt_create_upper_pte (vm , pdpe -> pfn , vaddr , paddr , 1 , page_size );
268
- if (pde -> page_size )
233
+ pde = virt_create_upper_pte (vm , PTE_GET_PFN ( * pdpe ) , vaddr , paddr , 1 , page_size );
234
+ if (* pde & PTE_LARGE_MASK )
269
235
return ;
270
236
271
237
/* Fill in page table entry. */
272
- pte = virt_get_pte (vm , pde -> pfn , vaddr , 0 );
273
- TEST_ASSERT (!pte -> present ,
238
+ pte = virt_get_pte (vm , PTE_GET_PFN ( * pde ) , vaddr , 0 );
239
+ TEST_ASSERT (!( * pte & PTE_PRESENT_MASK ) ,
274
240
"PTE already present for 4k page at vaddr: 0x%lx\n" , vaddr );
275
- pte -> pfn = paddr >> vm -> page_shift ;
276
- pte -> writable = true;
277
- pte -> present = 1 ;
241
+ * pte = PTE_PRESENT_MASK | PTE_WRITABLE_MASK | (paddr & PHYSICAL_PAGE_MASK );
278
242
}
279
243
280
244
void virt_pg_map (struct kvm_vm * vm , uint64_t vaddr , uint64_t paddr )
281
245
{
282
246
__virt_pg_map (vm , vaddr , paddr , X86_PAGE_SIZE_4K );
283
247
}
284
248
285
- static struct pageTableEntry * _vm_get_page_table_entry (struct kvm_vm * vm , int vcpuid ,
249
+ static uint64_t * _vm_get_page_table_entry (struct kvm_vm * vm , int vcpuid ,
286
250
uint64_t vaddr )
287
251
{
288
252
uint16_t index [4 ];
289
- struct pageUpperEntry * pml4e , * pdpe , * pde ;
290
- struct pageTableEntry * pte ;
253
+ uint64_t * pml4e , * pdpe , * pde ;
254
+ uint64_t * pte ;
291
255
struct kvm_cpuid_entry2 * entry ;
292
256
struct kvm_sregs sregs ;
293
257
int max_phy_addr ;
@@ -329,57 +293,55 @@ static struct pageTableEntry *_vm_get_page_table_entry(struct kvm_vm *vm, int vc
329
293
index [3 ] = (vaddr >> 39 ) & 0x1ffu ;
330
294
331
295
pml4e = addr_gpa2hva (vm , vm -> pgd );
332
- TEST_ASSERT (pml4e [index [3 ]]. present ,
296
+ TEST_ASSERT (pml4e [index [3 ]] & PTE_PRESENT_MASK ,
333
297
"Expected pml4e to be present for gva: 0x%08lx" , vaddr );
334
- TEST_ASSERT ((* (uint64_t * )(& pml4e [index [3 ]]) &
335
- (rsvd_mask | (1ull << 7 ))) == 0 ,
298
+ TEST_ASSERT ((pml4e [index [3 ]] & (rsvd_mask | PTE_LARGE_MASK )) == 0 ,
336
299
"Unexpected reserved bits set." );
337
300
338
- pdpe = addr_gpa2hva (vm , pml4e [index [3 ]]. pfn * vm -> page_size );
339
- TEST_ASSERT (pdpe [index [2 ]]. present ,
301
+ pdpe = addr_gpa2hva (vm , PTE_GET_PFN ( pml4e [index [3 ]]) * vm -> page_size );
302
+ TEST_ASSERT (pdpe [index [2 ]] & PTE_PRESENT_MASK ,
340
303
"Expected pdpe to be present for gva: 0x%08lx" , vaddr );
341
- TEST_ASSERT (pdpe [index [2 ]]. page_size == 0 ,
304
+ TEST_ASSERT (!( pdpe [index [2 ]] & PTE_LARGE_MASK ) ,
342
305
"Expected pdpe to map a pde not a 1-GByte page." );
343
- TEST_ASSERT ((* ( uint64_t * )( & pdpe [index [2 ]]) & rsvd_mask ) == 0 ,
306
+ TEST_ASSERT ((pdpe [index [2 ]] & rsvd_mask ) == 0 ,
344
307
"Unexpected reserved bits set." );
345
308
346
- pde = addr_gpa2hva (vm , pdpe [index [2 ]]. pfn * vm -> page_size );
347
- TEST_ASSERT (pde [index [1 ]]. present ,
309
+ pde = addr_gpa2hva (vm , PTE_GET_PFN ( pdpe [index [2 ]]) * vm -> page_size );
310
+ TEST_ASSERT (pde [index [1 ]] & PTE_PRESENT_MASK ,
348
311
"Expected pde to be present for gva: 0x%08lx" , vaddr );
349
- TEST_ASSERT (pde [index [1 ]]. page_size == 0 ,
312
+ TEST_ASSERT (!( pde [index [1 ]] & PTE_LARGE_MASK ) ,
350
313
"Expected pde to map a pte not a 2-MByte page." );
351
- TEST_ASSERT ((* ( uint64_t * )( & pde [index [1 ]]) & rsvd_mask ) == 0 ,
314
+ TEST_ASSERT ((pde [index [1 ]] & rsvd_mask ) == 0 ,
352
315
"Unexpected reserved bits set." );
353
316
354
- pte = addr_gpa2hva (vm , pde [index [1 ]]. pfn * vm -> page_size );
355
- TEST_ASSERT (pte [index [0 ]]. present ,
317
+ pte = addr_gpa2hva (vm , PTE_GET_PFN ( pde [index [1 ]]) * vm -> page_size );
318
+ TEST_ASSERT (pte [index [0 ]] & PTE_PRESENT_MASK ,
356
319
"Expected pte to be present for gva: 0x%08lx" , vaddr );
357
320
358
321
return & pte [index [0 ]];
359
322
}
360
323
361
324
uint64_t vm_get_page_table_entry (struct kvm_vm * vm , int vcpuid , uint64_t vaddr )
362
325
{
363
- struct pageTableEntry * pte = _vm_get_page_table_entry (vm , vcpuid , vaddr );
326
+ uint64_t * pte = _vm_get_page_table_entry (vm , vcpuid , vaddr );
364
327
365
328
return * (uint64_t * )pte ;
366
329
}
367
330
368
331
void vm_set_page_table_entry (struct kvm_vm * vm , int vcpuid , uint64_t vaddr ,
369
332
uint64_t pte )
370
333
{
371
- struct pageTableEntry * new_pte = _vm_get_page_table_entry (vm , vcpuid ,
372
- vaddr );
334
+ uint64_t * new_pte = _vm_get_page_table_entry (vm , vcpuid , vaddr );
373
335
374
336
* (uint64_t * )new_pte = pte ;
375
337
}
376
338
377
339
void virt_dump (FILE * stream , struct kvm_vm * vm , uint8_t indent )
378
340
{
379
- struct pageUpperEntry * pml4e , * pml4e_start ;
380
- struct pageUpperEntry * pdpe , * pdpe_start ;
381
- struct pageUpperEntry * pde , * pde_start ;
382
- struct pageTableEntry * pte , * pte_start ;
341
+ uint64_t * pml4e , * pml4e_start ;
342
+ uint64_t * pdpe , * pdpe_start ;
343
+ uint64_t * pde , * pde_start ;
344
+ uint64_t * pte , * pte_start ;
383
345
384
346
if (!vm -> pgd_created )
385
347
return ;
@@ -389,58 +351,58 @@ void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
389
351
fprintf (stream , "%*s index hvaddr gpaddr "
390
352
"addr w exec dirty\n" ,
391
353
indent , "" );
392
- pml4e_start = (struct pageUpperEntry * ) addr_gpa2hva (vm , vm -> pgd );
354
+ pml4e_start = (uint64_t * ) addr_gpa2hva (vm , vm -> pgd );
393
355
for (uint16_t n1 = 0 ; n1 <= 0x1ffu ; n1 ++ ) {
394
356
pml4e = & pml4e_start [n1 ];
395
- if (!pml4e -> present )
357
+ if (!( * pml4e & PTE_PRESENT_MASK ) )
396
358
continue ;
397
- fprintf (stream , "%*spml4e 0x%-3zx %p 0x%-12lx 0x%-10lx %u "
359
+ fprintf (stream , "%*spml4e 0x%-3zx %p 0x%-12lx 0x%-10llx %u "
398
360
" %u\n" ,
399
361
indent , "" ,
400
362
pml4e - pml4e_start , pml4e ,
401
- addr_hva2gpa (vm , pml4e ), ( uint64_t ) pml4e -> pfn ,
402
- pml4e -> writable , pml4e -> execute_disable );
363
+ addr_hva2gpa (vm , pml4e ), PTE_GET_PFN ( * pml4e ) ,
364
+ !!( * pml4e & PTE_WRITABLE_MASK ), !!( * pml4e & PTE_NX_MASK ) );
403
365
404
- pdpe_start = addr_gpa2hva (vm , pml4e -> pfn * vm -> page_size );
366
+ pdpe_start = addr_gpa2hva (vm , * pml4e & PHYSICAL_PAGE_MASK );
405
367
for (uint16_t n2 = 0 ; n2 <= 0x1ffu ; n2 ++ ) {
406
368
pdpe = & pdpe_start [n2 ];
407
- if (!pdpe -> present )
369
+ if (!( * pdpe & PTE_PRESENT_MASK ) )
408
370
continue ;
409
- fprintf (stream , "%*spdpe 0x%-3zx %p 0x%-12lx 0x%-10lx "
371
+ fprintf (stream , "%*spdpe 0x%-3zx %p 0x%-12lx 0x%-10llx "
410
372
"%u %u\n" ,
411
373
indent , "" ,
412
374
pdpe - pdpe_start , pdpe ,
413
375
addr_hva2gpa (vm , pdpe ),
414
- ( uint64_t ) pdpe -> pfn , pdpe -> writable ,
415
- pdpe -> execute_disable );
376
+ PTE_GET_PFN ( * pdpe ), !!( * pdpe & PTE_WRITABLE_MASK ) ,
377
+ !!( * pdpe & PTE_NX_MASK ) );
416
378
417
- pde_start = addr_gpa2hva (vm , pdpe -> pfn * vm -> page_size );
379
+ pde_start = addr_gpa2hva (vm , * pdpe & PHYSICAL_PAGE_MASK );
418
380
for (uint16_t n3 = 0 ; n3 <= 0x1ffu ; n3 ++ ) {
419
381
pde = & pde_start [n3 ];
420
- if (!pde -> present )
382
+ if (!( * pde & PTE_PRESENT_MASK ) )
421
383
continue ;
422
384
fprintf (stream , "%*spde 0x%-3zx %p "
423
- "0x%-12lx 0x%-10lx %u %u\n" ,
385
+ "0x%-12lx 0x%-10llx %u %u\n" ,
424
386
indent , "" , pde - pde_start , pde ,
425
387
addr_hva2gpa (vm , pde ),
426
- ( uint64_t ) pde -> pfn , pde -> writable ,
427
- pde -> execute_disable );
388
+ PTE_GET_PFN ( * pde ), !!( * pde & PTE_WRITABLE_MASK ) ,
389
+ !!( * pde & PTE_NX_MASK ) );
428
390
429
- pte_start = addr_gpa2hva (vm , pde -> pfn * vm -> page_size );
391
+ pte_start = addr_gpa2hva (vm , * pde & PHYSICAL_PAGE_MASK );
430
392
for (uint16_t n4 = 0 ; n4 <= 0x1ffu ; n4 ++ ) {
431
393
pte = & pte_start [n4 ];
432
- if (!pte -> present )
394
+ if (!( * pte & PTE_PRESENT_MASK ) )
433
395
continue ;
434
396
fprintf (stream , "%*spte 0x%-3zx %p "
435
- "0x%-12lx 0x%-10lx %u %u "
397
+ "0x%-12lx 0x%-10llx %u %u "
436
398
" %u 0x%-10lx\n" ,
437
399
indent , "" ,
438
400
pte - pte_start , pte ,
439
401
addr_hva2gpa (vm , pte ),
440
- ( uint64_t ) pte -> pfn ,
441
- pte -> writable ,
442
- pte -> execute_disable ,
443
- pte -> dirty ,
402
+ PTE_GET_PFN ( * pte ) ,
403
+ !!( * pte & PTE_WRITABLE_MASK ) ,
404
+ !!( * pte & PTE_NX_MASK ) ,
405
+ !!( * pte & PTE_DIRTY_MASK ) ,
444
406
((uint64_t ) n1 << 27 )
445
407
| ((uint64_t ) n2 << 18 )
446
408
| ((uint64_t ) n3 << 9 )
@@ -558,8 +520,8 @@ static void kvm_seg_set_kernel_data_64bit(struct kvm_vm *vm, uint16_t selector,
558
520
vm_paddr_t addr_gva2gpa (struct kvm_vm * vm , vm_vaddr_t gva )
559
521
{
560
522
uint16_t index [4 ];
561
- struct pageUpperEntry * pml4e , * pdpe , * pde ;
562
- struct pageTableEntry * pte ;
523
+ uint64_t * pml4e , * pdpe , * pde ;
524
+ uint64_t * pte ;
563
525
564
526
TEST_ASSERT (vm -> mode == VM_MODE_PXXV48_4K , "Attempt to use "
565
527
"unknown or unsupported guest mode, mode: 0x%x" , vm -> mode );
@@ -572,22 +534,22 @@ vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
572
534
if (!vm -> pgd_created )
573
535
goto unmapped_gva ;
574
536
pml4e = addr_gpa2hva (vm , vm -> pgd );
575
- if (!pml4e [index [3 ]]. present )
537
+ if (!( pml4e [index [3 ]] & PTE_PRESENT_MASK ) )
576
538
goto unmapped_gva ;
577
539
578
- pdpe = addr_gpa2hva (vm , pml4e [index [3 ]]. pfn * vm -> page_size );
579
- if (!pdpe [index [2 ]]. present )
540
+ pdpe = addr_gpa2hva (vm , PTE_GET_PFN ( pml4e [index [3 ]]) * vm -> page_size );
541
+ if (!( pdpe [index [2 ]] & PTE_PRESENT_MASK ) )
580
542
goto unmapped_gva ;
581
543
582
- pde = addr_gpa2hva (vm , pdpe [index [2 ]]. pfn * vm -> page_size );
583
- if (!pde [index [1 ]]. present )
544
+ pde = addr_gpa2hva (vm , PTE_GET_PFN ( pdpe [index [2 ]]) * vm -> page_size );
545
+ if (!( pde [index [1 ]] & PTE_PRESENT_MASK ) )
584
546
goto unmapped_gva ;
585
547
586
- pte = addr_gpa2hva (vm , pde [index [1 ]]. pfn * vm -> page_size );
587
- if (!pte [index [0 ]]. present )
548
+ pte = addr_gpa2hva (vm , PTE_GET_PFN ( pde [index [1 ]]) * vm -> page_size );
549
+ if (!( pte [index [0 ]] & PTE_PRESENT_MASK ) )
588
550
goto unmapped_gva ;
589
551
590
- return (pte [index [0 ]]. pfn * vm -> page_size ) + (gva & 0xfffu );
552
+ return (PTE_GET_PFN ( pte [index [0 ]]) * vm -> page_size ) + (gva & 0xfffu );
591
553
592
554
unmapped_gva :
593
555
TEST_FAIL ("No mapping for vm virtual address, gva: 0x%lx" , gva );
0 commit comments