@@ -161,7 +161,7 @@ module_param(allow_smaller_maxphyaddr, bool, S_IRUGO);
161
161
162
162
/*
163
163
* List of MSRs that can be directly passed to the guest.
164
- * In addition to these x2apic and PT MSRs are handled specially.
164
+ * In addition to these x2apic, PT and LBR MSRs are handled specially.
165
165
*/
166
166
static u32 vmx_possible_passthrough_msrs [MAX_POSSIBLE_PASSTHROUGH_MSRS ] = {
167
167
MSR_IA32_SPEC_CTRL ,
@@ -669,25 +669,14 @@ static inline bool cpu_need_virtualize_apic_accesses(struct kvm_vcpu *vcpu)
669
669
return flexpriority_enabled && lapic_in_kernel (vcpu );
670
670
}
671
671
672
- static int possible_passthrough_msr_slot (u32 msr )
672
+ static int vmx_get_passthrough_msr_slot (u32 msr )
673
673
{
674
- u32 i ;
675
-
676
- for (i = 0 ; i < ARRAY_SIZE (vmx_possible_passthrough_msrs ); i ++ )
677
- if (vmx_possible_passthrough_msrs [i ] == msr )
678
- return i ;
679
-
680
- return - ENOENT ;
681
- }
682
-
683
- static bool is_valid_passthrough_msr (u32 msr )
684
- {
685
- bool r ;
674
+ int i ;
686
675
687
676
switch (msr ) {
688
677
case 0x800 ... 0x8ff :
689
678
/* x2APIC MSRs. These are handled in vmx_update_msr_bitmap_x2apic() */
690
- return true ;
679
+ return - ENOENT ;
691
680
case MSR_IA32_RTIT_STATUS :
692
681
case MSR_IA32_RTIT_OUTPUT_BASE :
693
682
case MSR_IA32_RTIT_OUTPUT_MASK :
@@ -702,14 +691,16 @@ static bool is_valid_passthrough_msr(u32 msr)
702
691
case MSR_LBR_CORE_FROM ... MSR_LBR_CORE_FROM + 8 :
703
692
case MSR_LBR_CORE_TO ... MSR_LBR_CORE_TO + 8 :
704
693
/* LBR MSRs. These are handled in vmx_update_intercept_for_lbr_msrs() */
705
- return true ;
694
+ return - ENOENT ;
706
695
}
707
696
708
- r = possible_passthrough_msr_slot (msr ) != - ENOENT ;
709
-
710
- WARN (!r , "Invalid MSR %x, please adapt vmx_possible_passthrough_msrs[]" , msr );
697
+ for (i = 0 ; i < ARRAY_SIZE (vmx_possible_passthrough_msrs ); i ++ ) {
698
+ if (vmx_possible_passthrough_msrs [i ] == msr )
699
+ return i ;
700
+ }
711
701
712
- return r ;
702
+ WARN (1 , "Invalid MSR %x, please adapt vmx_possible_passthrough_msrs[]" , msr );
703
+ return - ENOENT ;
713
704
}
714
705
715
706
struct vmx_uret_msr * vmx_find_uret_msr (struct vcpu_vmx * vmx , u32 msr )
@@ -3963,6 +3954,7 @@ void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
3963
3954
{
3964
3955
struct vcpu_vmx * vmx = to_vmx (vcpu );
3965
3956
unsigned long * msr_bitmap = vmx -> vmcs01 .msr_bitmap ;
3957
+ int idx ;
3966
3958
3967
3959
if (!cpu_has_vmx_msr_bitmap ())
3968
3960
return ;
@@ -3972,16 +3964,13 @@ void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
3972
3964
/*
3973
3965
* Mark the desired intercept state in shadow bitmap, this is needed
3974
3966
* for resync when the MSR filters change.
3975
- */
3976
- if (is_valid_passthrough_msr (msr )) {
3977
- int idx = possible_passthrough_msr_slot (msr );
3978
-
3979
- if (idx != - ENOENT ) {
3980
- if (type & MSR_TYPE_R )
3981
- clear_bit (idx , vmx -> shadow_msr_intercept .read );
3982
- if (type & MSR_TYPE_W )
3983
- clear_bit (idx , vmx -> shadow_msr_intercept .write );
3984
- }
3967
+ */
3968
+ idx = vmx_get_passthrough_msr_slot (msr );
3969
+ if (idx >= 0 ) {
3970
+ if (type & MSR_TYPE_R )
3971
+ clear_bit (idx , vmx -> shadow_msr_intercept .read );
3972
+ if (type & MSR_TYPE_W )
3973
+ clear_bit (idx , vmx -> shadow_msr_intercept .write );
3985
3974
}
3986
3975
3987
3976
if ((type & MSR_TYPE_R ) &&
@@ -4007,6 +3996,7 @@ void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
4007
3996
{
4008
3997
struct vcpu_vmx * vmx = to_vmx (vcpu );
4009
3998
unsigned long * msr_bitmap = vmx -> vmcs01 .msr_bitmap ;
3999
+ int idx ;
4010
4000
4011
4001
if (!cpu_has_vmx_msr_bitmap ())
4012
4002
return ;
@@ -4016,16 +4006,13 @@ void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
4016
4006
/*
4017
4007
* Mark the desired intercept state in shadow bitmap, this is needed
4018
4008
* for resync when the MSR filter changes.
4019
- */
4020
- if (is_valid_passthrough_msr (msr )) {
4021
- int idx = possible_passthrough_msr_slot (msr );
4022
-
4023
- if (idx != - ENOENT ) {
4024
- if (type & MSR_TYPE_R )
4025
- set_bit (idx , vmx -> shadow_msr_intercept .read );
4026
- if (type & MSR_TYPE_W )
4027
- set_bit (idx , vmx -> shadow_msr_intercept .write );
4028
- }
4009
+ */
4010
+ idx = vmx_get_passthrough_msr_slot (msr );
4011
+ if (idx >= 0 ) {
4012
+ if (type & MSR_TYPE_R )
4013
+ set_bit (idx , vmx -> shadow_msr_intercept .read );
4014
+ if (type & MSR_TYPE_W )
4015
+ set_bit (idx , vmx -> shadow_msr_intercept .write );
4029
4016
}
4030
4017
4031
4018
if (type & MSR_TYPE_R )
@@ -4136,6 +4123,9 @@ static void vmx_msr_filter_changed(struct kvm_vcpu *vcpu)
4136
4123
struct vcpu_vmx * vmx = to_vmx (vcpu );
4137
4124
u32 i ;
4138
4125
4126
+ if (!cpu_has_vmx_msr_bitmap ())
4127
+ return ;
4128
+
4139
4129
/*
4140
4130
* Redo intercept permissions for MSRs that KVM is passing through to
4141
4131
* the guest. Disabling interception will check the new MSR filter and
@@ -6539,7 +6529,7 @@ static int __vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
6539
6529
vcpu -> run -> internal .suberror = KVM_INTERNAL_ERROR_DELIVERY_EV ;
6540
6530
vcpu -> run -> internal .data [0 ] = vectoring_info ;
6541
6531
vcpu -> run -> internal .data [1 ] = exit_reason .full ;
6542
- vcpu -> run -> internal .data [2 ] = vcpu -> arch . exit_qualification ;
6532
+ vcpu -> run -> internal .data [2 ] = vmx_get_exit_qual ( vcpu ) ;
6543
6533
if (exit_reason .basic == EXIT_REASON_EPT_MISCONFIG ) {
6544
6534
vcpu -> run -> internal .data [ndata ++ ] =
6545
6535
vmcs_read64 (GUEST_PHYSICAL_ADDRESS );
0 commit comments