@@ -658,25 +658,14 @@ static inline bool cpu_need_virtualize_apic_accesses(struct kvm_vcpu *vcpu)
658
658
return flexpriority_enabled && lapic_in_kernel (vcpu );
659
659
}
660
660
661
- static int possible_passthrough_msr_slot (u32 msr )
661
+ static int vmx_get_passthrough_msr_slot (u32 msr )
662
662
{
663
- u32 i ;
664
-
665
- for (i = 0 ; i < ARRAY_SIZE (vmx_possible_passthrough_msrs ); i ++ )
666
- if (vmx_possible_passthrough_msrs [i ] == msr )
667
- return i ;
668
-
669
- return - ENOENT ;
670
- }
671
-
672
- static bool is_valid_passthrough_msr (u32 msr )
673
- {
674
- bool r ;
663
+ int i ;
675
664
676
665
switch (msr ) {
677
666
case 0x800 ... 0x8ff :
678
667
/* x2APIC MSRs. These are handled in vmx_update_msr_bitmap_x2apic() */
679
- return true ;
668
+ return - ENOENT ;
680
669
case MSR_IA32_RTIT_STATUS :
681
670
case MSR_IA32_RTIT_OUTPUT_BASE :
682
671
case MSR_IA32_RTIT_OUTPUT_MASK :
@@ -691,14 +680,16 @@ static bool is_valid_passthrough_msr(u32 msr)
691
680
case MSR_LBR_CORE_FROM ... MSR_LBR_CORE_FROM + 8 :
692
681
case MSR_LBR_CORE_TO ... MSR_LBR_CORE_TO + 8 :
693
682
/* LBR MSRs. These are handled in vmx_update_intercept_for_lbr_msrs() */
694
- return true ;
683
+ return - ENOENT ;
695
684
}
696
685
697
- r = possible_passthrough_msr_slot (msr ) != - ENOENT ;
698
-
699
- WARN (!r , "Invalid MSR %x, please adapt vmx_possible_passthrough_msrs[]" , msr );
686
+ for (i = 0 ; i < ARRAY_SIZE (vmx_possible_passthrough_msrs ); i ++ ) {
687
+ if (vmx_possible_passthrough_msrs [i ] == msr )
688
+ return i ;
689
+ }
700
690
701
- return r ;
691
+ WARN (1 , "Invalid MSR %x, please adapt vmx_possible_passthrough_msrs[]" , msr );
692
+ return - ENOENT ;
702
693
}
703
694
704
695
struct vmx_uret_msr * vmx_find_uret_msr (struct vcpu_vmx * vmx , u32 msr )
@@ -3954,6 +3945,7 @@ void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
3954
3945
{
3955
3946
struct vcpu_vmx * vmx = to_vmx (vcpu );
3956
3947
unsigned long * msr_bitmap = vmx -> vmcs01 .msr_bitmap ;
3948
+ int idx ;
3957
3949
3958
3950
if (!cpu_has_vmx_msr_bitmap ())
3959
3951
return ;
@@ -3963,16 +3955,13 @@ void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
3963
3955
/*
3964
3956
* Mark the desired intercept state in shadow bitmap, this is needed
3965
3957
* for resync when the MSR filters change.
3966
- */
3967
- if (is_valid_passthrough_msr (msr )) {
3968
- int idx = possible_passthrough_msr_slot (msr );
3969
-
3970
- if (idx != - ENOENT ) {
3971
- if (type & MSR_TYPE_R )
3972
- clear_bit (idx , vmx -> shadow_msr_intercept .read );
3973
- if (type & MSR_TYPE_W )
3974
- clear_bit (idx , vmx -> shadow_msr_intercept .write );
3975
- }
3958
+ */
3959
+ idx = vmx_get_passthrough_msr_slot (msr );
3960
+ if (idx >= 0 ) {
3961
+ if (type & MSR_TYPE_R )
3962
+ clear_bit (idx , vmx -> shadow_msr_intercept .read );
3963
+ if (type & MSR_TYPE_W )
3964
+ clear_bit (idx , vmx -> shadow_msr_intercept .write );
3976
3965
}
3977
3966
3978
3967
if ((type & MSR_TYPE_R ) &&
@@ -3998,6 +3987,7 @@ void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
3998
3987
{
3999
3988
struct vcpu_vmx * vmx = to_vmx (vcpu );
4000
3989
unsigned long * msr_bitmap = vmx -> vmcs01 .msr_bitmap ;
3990
+ int idx ;
4001
3991
4002
3992
if (!cpu_has_vmx_msr_bitmap ())
4003
3993
return ;
@@ -4007,16 +3997,13 @@ void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
4007
3997
/*
4008
3998
* Mark the desired intercept state in shadow bitmap, this is needed
4009
3999
* for resync when the MSR filter changes.
4010
- */
4011
- if (is_valid_passthrough_msr (msr )) {
4012
- int idx = possible_passthrough_msr_slot (msr );
4013
-
4014
- if (idx != - ENOENT ) {
4015
- if (type & MSR_TYPE_R )
4016
- set_bit (idx , vmx -> shadow_msr_intercept .read );
4017
- if (type & MSR_TYPE_W )
4018
- set_bit (idx , vmx -> shadow_msr_intercept .write );
4019
- }
4000
+ */
4001
+ idx = vmx_get_passthrough_msr_slot (msr );
4002
+ if (idx >= 0 ) {
4003
+ if (type & MSR_TYPE_R )
4004
+ set_bit (idx , vmx -> shadow_msr_intercept .read );
4005
+ if (type & MSR_TYPE_W )
4006
+ set_bit (idx , vmx -> shadow_msr_intercept .write );
4020
4007
}
4021
4008
4022
4009
if (type & MSR_TYPE_R )
0 commit comments