@@ -3071,13 +3071,6 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
3071
3071
3072
3072
vmx -> rmode .vm86_active = 1 ;
3073
3073
3074
- /*
3075
- * Very old userspace does not call KVM_SET_TSS_ADDR before entering
3076
- * vcpu. Warn the user that an update is overdue.
3077
- */
3078
- if (!kvm_vmx -> tss_addr )
3079
- pr_warn_once ("KVM_SET_TSS_ADDR needs to be called before running vCPU\n" );
3080
-
3081
3074
vmx_segment_cache_clear (vmx );
3082
3075
3083
3076
vmcs_writel (GUEST_TR_BASE , kvm_vmx -> tss_addr );
@@ -3350,7 +3343,7 @@ void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
3350
3343
vmx -> emulation_required = vmx_emulation_required (vcpu );
3351
3344
}
3352
3345
3353
- static int vmx_get_max_tdp_level (void )
3346
+ static int vmx_get_max_ept_level (void )
3354
3347
{
3355
3348
if (cpu_has_vmx_ept_5levels ())
3356
3349
return 5 ;
@@ -6796,8 +6789,10 @@ static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu)
6796
6789
vmcs_write64 (APIC_ACCESS_ADDR , pfn_to_hpa (pfn ));
6797
6790
read_unlock (& vcpu -> kvm -> mmu_lock );
6798
6791
6799
- vmx_flush_tlb_current (vcpu );
6800
-
6792
+ /*
6793
+ * No need for a manual TLB flush at this point, KVM has already done a
6794
+ * flush if there were SPTEs pointing at the previous page.
6795
+ */
6801
6796
out :
6802
6797
/*
6803
6798
* Do not pin apic access page in memory, the MMU notifier
@@ -7243,13 +7238,20 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
7243
7238
flags );
7244
7239
7245
7240
vcpu -> arch .cr2 = native_read_cr2 ();
7241
+ vcpu -> arch .regs_avail &= ~VMX_REGS_LAZY_LOAD_SET ;
7242
+
7243
+ vmx -> idt_vectoring_info = 0 ;
7246
7244
7247
7245
vmx_enable_fb_clear (vmx );
7248
7246
7249
- if (unlikely (vmx -> fail ))
7247
+ if (unlikely (vmx -> fail )) {
7250
7248
vmx -> exit_reason .full = 0xdead ;
7251
- else
7252
- vmx -> exit_reason .full = vmcs_read32 (VM_EXIT_REASON );
7249
+ goto out ;
7250
+ }
7251
+
7252
+ vmx -> exit_reason .full = vmcs_read32 (VM_EXIT_REASON );
7253
+ if (likely (!vmx -> exit_reason .failed_vmentry ))
7254
+ vmx -> idt_vectoring_info = vmcs_read32 (IDT_VECTORING_INFO_FIELD );
7253
7255
7254
7256
if ((u16 )vmx -> exit_reason .basic == EXIT_REASON_EXCEPTION_NMI &&
7255
7257
is_nmi (vmx_get_intr_info (vcpu ))) {
@@ -7258,6 +7260,7 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
7258
7260
kvm_after_interrupt (vcpu );
7259
7261
}
7260
7262
7263
+ out :
7261
7264
guest_state_exit_irqoff ();
7262
7265
}
7263
7266
@@ -7379,8 +7382,6 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
7379
7382
loadsegment (es , __USER_DS );
7380
7383
#endif
7381
7384
7382
- vcpu -> arch .regs_avail &= ~VMX_REGS_LAZY_LOAD_SET ;
7383
-
7384
7385
pt_guest_exit (vmx );
7385
7386
7386
7387
kvm_load_host_xsave_state (vcpu );
@@ -7397,17 +7398,12 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
7397
7398
vmx -> nested .nested_run_pending = 0 ;
7398
7399
}
7399
7400
7400
- vmx -> idt_vectoring_info = 0 ;
7401
-
7402
7401
if (unlikely (vmx -> fail ))
7403
7402
return EXIT_FASTPATH_NONE ;
7404
7403
7405
7404
if (unlikely ((u16 )vmx -> exit_reason .basic == EXIT_REASON_MCE_DURING_VMENTRY ))
7406
7405
kvm_machine_check ();
7407
7406
7408
- if (likely (!vmx -> exit_reason .failed_vmentry ))
7409
- vmx -> idt_vectoring_info = vmcs_read32 (IDT_VECTORING_INFO_FIELD );
7410
-
7411
7407
trace_kvm_exit (vcpu , KVM_ISA_VMX );
7412
7408
7413
7409
if (unlikely (vmx -> exit_reason .failed_vmentry ))
@@ -8526,7 +8522,7 @@ static __init int hardware_setup(void)
8526
8522
*/
8527
8523
vmx_setup_me_spte_mask ();
8528
8524
8529
- kvm_configure_mmu (enable_ept , 0 , vmx_get_max_tdp_level (),
8525
+ kvm_configure_mmu (enable_ept , 0 , vmx_get_max_ept_level (),
8530
8526
ept_caps_to_lpage_level (vmx_capability .ept ));
8531
8527
8532
8528
/*
0 commit comments