@@ -335,6 +335,214 @@ static void vt_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
335
335
vmx_deliver_interrupt (apic , delivery_mode , trig_mode , vector );
336
336
}
337
337
338
+ static void vt_vcpu_after_set_cpuid (struct kvm_vcpu * vcpu )
339
+ {
340
+ if (is_td_vcpu (vcpu ))
341
+ return ;
342
+
343
+ vmx_vcpu_after_set_cpuid (vcpu );
344
+ }
345
+
346
+ static void vt_update_exception_bitmap (struct kvm_vcpu * vcpu )
347
+ {
348
+ if (is_td_vcpu (vcpu ))
349
+ return ;
350
+
351
+ vmx_update_exception_bitmap (vcpu );
352
+ }
353
+
354
+ static u64 vt_get_segment_base (struct kvm_vcpu * vcpu , int seg )
355
+ {
356
+ if (is_td_vcpu (vcpu ))
357
+ return 0 ;
358
+
359
+ return vmx_get_segment_base (vcpu , seg );
360
+ }
361
+
362
+ static void vt_get_segment (struct kvm_vcpu * vcpu , struct kvm_segment * var ,
363
+ int seg )
364
+ {
365
+ if (is_td_vcpu (vcpu )) {
366
+ memset (var , 0 , sizeof (* var ));
367
+ return ;
368
+ }
369
+
370
+ vmx_get_segment (vcpu , var , seg );
371
+ }
372
+
373
+ static void vt_set_segment (struct kvm_vcpu * vcpu , struct kvm_segment * var ,
374
+ int seg )
375
+ {
376
+ if (is_td_vcpu (vcpu ))
377
+ return ;
378
+
379
+ vmx_set_segment (vcpu , var , seg );
380
+ }
381
+
382
+ static int vt_get_cpl (struct kvm_vcpu * vcpu )
383
+ {
384
+ if (is_td_vcpu (vcpu ))
385
+ return 0 ;
386
+
387
+ return vmx_get_cpl (vcpu );
388
+ }
389
+
390
+ static int vt_get_cpl_no_cache (struct kvm_vcpu * vcpu )
391
+ {
392
+ if (is_td_vcpu (vcpu ))
393
+ return 0 ;
394
+
395
+ return vmx_get_cpl_no_cache (vcpu );
396
+ }
397
+
398
+ static void vt_get_cs_db_l_bits (struct kvm_vcpu * vcpu , int * db , int * l )
399
+ {
400
+ if (is_td_vcpu (vcpu )) {
401
+ * db = 0 ;
402
+ * l = 0 ;
403
+ return ;
404
+ }
405
+
406
+ vmx_get_cs_db_l_bits (vcpu , db , l );
407
+ }
408
+
409
+ static bool vt_is_valid_cr0 (struct kvm_vcpu * vcpu , unsigned long cr0 )
410
+ {
411
+ if (is_td_vcpu (vcpu ))
412
+ return true;
413
+
414
+ return vmx_is_valid_cr0 (vcpu , cr0 );
415
+ }
416
+
417
+ static void vt_set_cr0 (struct kvm_vcpu * vcpu , unsigned long cr0 )
418
+ {
419
+ if (is_td_vcpu (vcpu ))
420
+ return ;
421
+
422
+ vmx_set_cr0 (vcpu , cr0 );
423
+ }
424
+
425
+ static bool vt_is_valid_cr4 (struct kvm_vcpu * vcpu , unsigned long cr4 )
426
+ {
427
+ if (is_td_vcpu (vcpu ))
428
+ return true;
429
+
430
+ return vmx_is_valid_cr4 (vcpu , cr4 );
431
+ }
432
+
433
+ static void vt_set_cr4 (struct kvm_vcpu * vcpu , unsigned long cr4 )
434
+ {
435
+ if (is_td_vcpu (vcpu ))
436
+ return ;
437
+
438
+ vmx_set_cr4 (vcpu , cr4 );
439
+ }
440
+
441
+ static int vt_set_efer (struct kvm_vcpu * vcpu , u64 efer )
442
+ {
443
+ if (is_td_vcpu (vcpu ))
444
+ return 0 ;
445
+
446
+ return vmx_set_efer (vcpu , efer );
447
+ }
448
+
449
+ static void vt_get_idt (struct kvm_vcpu * vcpu , struct desc_ptr * dt )
450
+ {
451
+ if (is_td_vcpu (vcpu )) {
452
+ memset (dt , 0 , sizeof (* dt ));
453
+ return ;
454
+ }
455
+
456
+ vmx_get_idt (vcpu , dt );
457
+ }
458
+
459
+ static void vt_set_idt (struct kvm_vcpu * vcpu , struct desc_ptr * dt )
460
+ {
461
+ if (is_td_vcpu (vcpu ))
462
+ return ;
463
+
464
+ vmx_set_idt (vcpu , dt );
465
+ }
466
+
467
+ static void vt_get_gdt (struct kvm_vcpu * vcpu , struct desc_ptr * dt )
468
+ {
469
+ if (is_td_vcpu (vcpu )) {
470
+ memset (dt , 0 , sizeof (* dt ));
471
+ return ;
472
+ }
473
+
474
+ vmx_get_gdt (vcpu , dt );
475
+ }
476
+
477
+ static void vt_set_gdt (struct kvm_vcpu * vcpu , struct desc_ptr * dt )
478
+ {
479
+ if (is_td_vcpu (vcpu ))
480
+ return ;
481
+
482
+ vmx_set_gdt (vcpu , dt );
483
+ }
484
+
485
+ static void vt_set_dr6 (struct kvm_vcpu * vcpu , unsigned long val )
486
+ {
487
+ if (is_td_vcpu (vcpu ))
488
+ return ;
489
+
490
+ vmx_set_dr6 (vcpu , val );
491
+ }
492
+
493
+ static void vt_set_dr7 (struct kvm_vcpu * vcpu , unsigned long val )
494
+ {
495
+ if (is_td_vcpu (vcpu ))
496
+ return ;
497
+
498
+ vmx_set_dr7 (vcpu , val );
499
+ }
500
+
501
+ static void vt_sync_dirty_debug_regs (struct kvm_vcpu * vcpu )
502
+ {
503
+ /*
504
+ * MOV-DR exiting is always cleared for TD guest, even in debug mode.
505
+ * Thus KVM_DEBUGREG_WONT_EXIT can never be set and it should never
506
+ * reach here for TD vcpu.
507
+ */
508
+ if (is_td_vcpu (vcpu ))
509
+ return ;
510
+
511
+ vmx_sync_dirty_debug_regs (vcpu );
512
+ }
513
+
514
+ static void vt_cache_reg (struct kvm_vcpu * vcpu , enum kvm_reg reg )
515
+ {
516
+ if (WARN_ON_ONCE (is_td_vcpu (vcpu )))
517
+ return ;
518
+
519
+ vmx_cache_reg (vcpu , reg );
520
+ }
521
+
522
+ static unsigned long vt_get_rflags (struct kvm_vcpu * vcpu )
523
+ {
524
+ if (is_td_vcpu (vcpu ))
525
+ return 0 ;
526
+
527
+ return vmx_get_rflags (vcpu );
528
+ }
529
+
530
+ static void vt_set_rflags (struct kvm_vcpu * vcpu , unsigned long rflags )
531
+ {
532
+ if (is_td_vcpu (vcpu ))
533
+ return ;
534
+
535
+ vmx_set_rflags (vcpu , rflags );
536
+ }
537
+
538
+ static bool vt_get_if_flag (struct kvm_vcpu * vcpu )
539
+ {
540
+ if (is_td_vcpu (vcpu ))
541
+ return false;
542
+
543
+ return vmx_get_if_flag (vcpu );
544
+ }
545
+
338
546
static void vt_flush_tlb_all (struct kvm_vcpu * vcpu )
339
547
{
340
548
if (is_td_vcpu (vcpu )) {
@@ -457,6 +665,14 @@ static void vt_inject_irq(struct kvm_vcpu *vcpu, bool reinjected)
457
665
vmx_inject_irq (vcpu , reinjected );
458
666
}
459
667
668
+ static void vt_inject_exception (struct kvm_vcpu * vcpu )
669
+ {
670
+ if (is_td_vcpu (vcpu ))
671
+ return ;
672
+
673
+ vmx_inject_exception (vcpu );
674
+ }
675
+
460
676
static void vt_cancel_injection (struct kvm_vcpu * vcpu )
461
677
{
462
678
if (is_td_vcpu (vcpu ))
@@ -504,6 +720,14 @@ static void vt_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason,
504
720
vmx_get_exit_info (vcpu , reason , info1 , info2 , intr_info , error_code );
505
721
}
506
722
723
+ static void vt_update_cr8_intercept (struct kvm_vcpu * vcpu , int tpr , int irr )
724
+ {
725
+ if (is_td_vcpu (vcpu ))
726
+ return ;
727
+
728
+ vmx_update_cr8_intercept (vcpu , tpr , irr );
729
+ }
730
+
507
731
static void vt_set_apic_access_page_addr (struct kvm_vcpu * vcpu )
508
732
{
509
733
if (is_td_vcpu (vcpu ))
@@ -522,6 +746,30 @@ static void vt_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
522
746
vmx_refresh_apicv_exec_ctrl (vcpu );
523
747
}
524
748
749
+ static void vt_load_eoi_exitmap (struct kvm_vcpu * vcpu , u64 * eoi_exit_bitmap )
750
+ {
751
+ if (is_td_vcpu (vcpu ))
752
+ return ;
753
+
754
+ vmx_load_eoi_exitmap (vcpu , eoi_exit_bitmap );
755
+ }
756
+
757
+ static int vt_set_tss_addr (struct kvm * kvm , unsigned int addr )
758
+ {
759
+ if (is_td (kvm ))
760
+ return 0 ;
761
+
762
+ return vmx_set_tss_addr (kvm , addr );
763
+ }
764
+
765
+ static int vt_set_identity_map_addr (struct kvm * kvm , u64 ident_addr )
766
+ {
767
+ if (is_td (kvm ))
768
+ return 0 ;
769
+
770
+ return vmx_set_identity_map_addr (kvm , ident_addr );
771
+ }
772
+
525
773
static int vt_mem_enc_ioctl (struct kvm * kvm , void __user * argp )
526
774
{
527
775
if (!is_td (kvm ))
@@ -583,32 +831,33 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
583
831
.vcpu_load = vt_vcpu_load ,
584
832
.vcpu_put = vt_vcpu_put ,
585
833
586
- .update_exception_bitmap = vmx_update_exception_bitmap ,
834
+ .update_exception_bitmap = vt_update_exception_bitmap ,
587
835
.get_feature_msr = vmx_get_feature_msr ,
588
836
.get_msr = vt_get_msr ,
589
837
.set_msr = vt_set_msr ,
590
- .get_segment_base = vmx_get_segment_base ,
591
- .get_segment = vmx_get_segment ,
592
- .set_segment = vmx_set_segment ,
593
- .get_cpl = vmx_get_cpl ,
594
- .get_cpl_no_cache = vmx_get_cpl_no_cache ,
595
- .get_cs_db_l_bits = vmx_get_cs_db_l_bits ,
596
- .is_valid_cr0 = vmx_is_valid_cr0 ,
597
- .set_cr0 = vmx_set_cr0 ,
598
- .is_valid_cr4 = vmx_is_valid_cr4 ,
599
- .set_cr4 = vmx_set_cr4 ,
600
- .set_efer = vmx_set_efer ,
601
- .get_idt = vmx_get_idt ,
602
- .set_idt = vmx_set_idt ,
603
- .get_gdt = vmx_get_gdt ,
604
- .set_gdt = vmx_set_gdt ,
605
- .set_dr6 = vmx_set_dr6 ,
606
- .set_dr7 = vmx_set_dr7 ,
607
- .sync_dirty_debug_regs = vmx_sync_dirty_debug_regs ,
608
- .cache_reg = vmx_cache_reg ,
609
- .get_rflags = vmx_get_rflags ,
610
- .set_rflags = vmx_set_rflags ,
611
- .get_if_flag = vmx_get_if_flag ,
838
+
839
+ .get_segment_base = vt_get_segment_base ,
840
+ .get_segment = vt_get_segment ,
841
+ .set_segment = vt_set_segment ,
842
+ .get_cpl = vt_get_cpl ,
843
+ .get_cpl_no_cache = vt_get_cpl_no_cache ,
844
+ .get_cs_db_l_bits = vt_get_cs_db_l_bits ,
845
+ .is_valid_cr0 = vt_is_valid_cr0 ,
846
+ .set_cr0 = vt_set_cr0 ,
847
+ .is_valid_cr4 = vt_is_valid_cr4 ,
848
+ .set_cr4 = vt_set_cr4 ,
849
+ .set_efer = vt_set_efer ,
850
+ .get_idt = vt_get_idt ,
851
+ .set_idt = vt_set_idt ,
852
+ .get_gdt = vt_get_gdt ,
853
+ .set_gdt = vt_set_gdt ,
854
+ .set_dr6 = vt_set_dr6 ,
855
+ .set_dr7 = vt_set_dr7 ,
856
+ .sync_dirty_debug_regs = vt_sync_dirty_debug_regs ,
857
+ .cache_reg = vt_cache_reg ,
858
+ .get_rflags = vt_get_rflags ,
859
+ .set_rflags = vt_set_rflags ,
860
+ .get_if_flag = vt_get_if_flag ,
612
861
613
862
.flush_tlb_all = vt_flush_tlb_all ,
614
863
.flush_tlb_current = vt_flush_tlb_current ,
@@ -625,36 +874,36 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
625
874
.patch_hypercall = vmx_patch_hypercall ,
626
875
.inject_irq = vt_inject_irq ,
627
876
.inject_nmi = vt_inject_nmi ,
628
- .inject_exception = vmx_inject_exception ,
877
+ .inject_exception = vt_inject_exception ,
629
878
.cancel_injection = vt_cancel_injection ,
630
879
.interrupt_allowed = vt_interrupt_allowed ,
631
880
.nmi_allowed = vt_nmi_allowed ,
632
881
.get_nmi_mask = vt_get_nmi_mask ,
633
882
.set_nmi_mask = vt_set_nmi_mask ,
634
883
.enable_nmi_window = vt_enable_nmi_window ,
635
884
.enable_irq_window = vt_enable_irq_window ,
636
- .update_cr8_intercept = vmx_update_cr8_intercept ,
885
+ .update_cr8_intercept = vt_update_cr8_intercept ,
637
886
638
887
.x2apic_icr_is_split = false,
639
888
.set_virtual_apic_mode = vt_set_virtual_apic_mode ,
640
889
.set_apic_access_page_addr = vt_set_apic_access_page_addr ,
641
890
.refresh_apicv_exec_ctrl = vt_refresh_apicv_exec_ctrl ,
642
- .load_eoi_exitmap = vmx_load_eoi_exitmap ,
891
+ .load_eoi_exitmap = vt_load_eoi_exitmap ,
643
892
.apicv_pre_state_restore = vt_apicv_pre_state_restore ,
644
893
.required_apicv_inhibits = VMX_REQUIRED_APICV_INHIBITS ,
645
894
.hwapic_isr_update = vt_hwapic_isr_update ,
646
895
.sync_pir_to_irr = vt_sync_pir_to_irr ,
647
896
.deliver_interrupt = vt_deliver_interrupt ,
648
897
.dy_apicv_has_pending_interrupt = pi_has_pending_interrupt ,
649
898
650
- .set_tss_addr = vmx_set_tss_addr ,
651
- .set_identity_map_addr = vmx_set_identity_map_addr ,
899
+ .set_tss_addr = vt_set_tss_addr ,
900
+ .set_identity_map_addr = vt_set_identity_map_addr ,
652
901
.get_mt_mask = vmx_get_mt_mask ,
653
902
654
903
.get_exit_info = vt_get_exit_info ,
655
904
.get_entry_info = vt_get_entry_info ,
656
905
657
- .vcpu_after_set_cpuid = vmx_vcpu_after_set_cpuid ,
906
+ .vcpu_after_set_cpuid = vt_vcpu_after_set_cpuid ,
658
907
659
908
.has_wbinvd_exit = cpu_has_vmx_wbinvd_exit ,
660
909
0 commit comments