Skip to content

Commit 259720c

Browse files
committed
KVM: VMX: Combine "check" and "get" APIs for passthrough MSR lookups
Combine possible_passthrough_msr_slot() and is_valid_passthrough_msr() into a single function, vmx_get_passthrough_msr_slot(), and have the combined helper return the slot on success, using a negative value to indicate "failure". Combining the operations avoids iterating over the array of passthrough MSRs twice for relevant MSRs. Suggested-by: Dongli Zhang <dongli.zhang@oracle.com> Reviewed-by: Dongli Zhang <dongli.zhang@oracle.com> Link: https://lore.kernel.org/r/20240223202104.3330974-4-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
1 parent bab2204 commit 259720c

File tree

1 file changed

+26
-39
lines changed

1 file changed

+26
-39
lines changed

arch/x86/kvm/vmx/vmx.c

Lines changed: 26 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -658,25 +658,14 @@ static inline bool cpu_need_virtualize_apic_accesses(struct kvm_vcpu *vcpu)
658658
return flexpriority_enabled && lapic_in_kernel(vcpu);
659659
}
660660

661-
static int possible_passthrough_msr_slot(u32 msr)
661+
static int vmx_get_passthrough_msr_slot(u32 msr)
662662
{
663-
u32 i;
664-
665-
for (i = 0; i < ARRAY_SIZE(vmx_possible_passthrough_msrs); i++)
666-
if (vmx_possible_passthrough_msrs[i] == msr)
667-
return i;
668-
669-
return -ENOENT;
670-
}
671-
672-
static bool is_valid_passthrough_msr(u32 msr)
673-
{
674-
bool r;
663+
int i;
675664

676665
switch (msr) {
677666
case 0x800 ... 0x8ff:
678667
/* x2APIC MSRs. These are handled in vmx_update_msr_bitmap_x2apic() */
679-
return true;
668+
return -ENOENT;
680669
case MSR_IA32_RTIT_STATUS:
681670
case MSR_IA32_RTIT_OUTPUT_BASE:
682671
case MSR_IA32_RTIT_OUTPUT_MASK:
@@ -691,14 +680,16 @@ static bool is_valid_passthrough_msr(u32 msr)
691680
case MSR_LBR_CORE_FROM ... MSR_LBR_CORE_FROM + 8:
692681
case MSR_LBR_CORE_TO ... MSR_LBR_CORE_TO + 8:
693682
/* LBR MSRs. These are handled in vmx_update_intercept_for_lbr_msrs() */
694-
return true;
683+
return -ENOENT;
695684
}
696685

697-
r = possible_passthrough_msr_slot(msr) != -ENOENT;
698-
699-
WARN(!r, "Invalid MSR %x, please adapt vmx_possible_passthrough_msrs[]", msr);
686+
for (i = 0; i < ARRAY_SIZE(vmx_possible_passthrough_msrs); i++) {
687+
if (vmx_possible_passthrough_msrs[i] == msr)
688+
return i;
689+
}
700690

701-
return r;
691+
WARN(1, "Invalid MSR %x, please adapt vmx_possible_passthrough_msrs[]", msr);
692+
return -ENOENT;
702693
}
703694

704695
struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr)
@@ -3954,6 +3945,7 @@ void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
39543945
{
39553946
struct vcpu_vmx *vmx = to_vmx(vcpu);
39563947
unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap;
3948+
int idx;
39573949

39583950
if (!cpu_has_vmx_msr_bitmap())
39593951
return;
@@ -3963,16 +3955,13 @@ void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
39633955
/*
39643956
* Mark the desired intercept state in shadow bitmap, this is needed
39653957
* for resync when the MSR filters change.
3966-
*/
3967-
if (is_valid_passthrough_msr(msr)) {
3968-
int idx = possible_passthrough_msr_slot(msr);
3969-
3970-
if (idx != -ENOENT) {
3971-
if (type & MSR_TYPE_R)
3972-
clear_bit(idx, vmx->shadow_msr_intercept.read);
3973-
if (type & MSR_TYPE_W)
3974-
clear_bit(idx, vmx->shadow_msr_intercept.write);
3975-
}
3958+
*/
3959+
idx = vmx_get_passthrough_msr_slot(msr);
3960+
if (idx >= 0) {
3961+
if (type & MSR_TYPE_R)
3962+
clear_bit(idx, vmx->shadow_msr_intercept.read);
3963+
if (type & MSR_TYPE_W)
3964+
clear_bit(idx, vmx->shadow_msr_intercept.write);
39763965
}
39773966

39783967
if ((type & MSR_TYPE_R) &&
@@ -3998,6 +3987,7 @@ void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
39983987
{
39993988
struct vcpu_vmx *vmx = to_vmx(vcpu);
40003989
unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap;
3990+
int idx;
40013991

40023992
if (!cpu_has_vmx_msr_bitmap())
40033993
return;
@@ -4007,16 +3997,13 @@ void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
40073997
/*
40083998
* Mark the desired intercept state in shadow bitmap, this is needed
40093999
* for resync when the MSR filter changes.
4010-
*/
4011-
if (is_valid_passthrough_msr(msr)) {
4012-
int idx = possible_passthrough_msr_slot(msr);
4013-
4014-
if (idx != -ENOENT) {
4015-
if (type & MSR_TYPE_R)
4016-
set_bit(idx, vmx->shadow_msr_intercept.read);
4017-
if (type & MSR_TYPE_W)
4018-
set_bit(idx, vmx->shadow_msr_intercept.write);
4019-
}
4000+
*/
4001+
idx = vmx_get_passthrough_msr_slot(msr);
4002+
if (idx >= 0) {
4003+
if (type & MSR_TYPE_R)
4004+
set_bit(idx, vmx->shadow_msr_intercept.read);
4005+
if (type & MSR_TYPE_W)
4006+
set_bit(idx, vmx->shadow_msr_intercept.write);
40204007
}
40214008

40224009
if (type & MSR_TYPE_R)

0 commit comments

Comments
 (0)