Skip to content

Commit e183d17

Browse files
committed
KVM: nSVM: Use KVM-governed feature framework to track "LBRv enabled"
Track "LBR virtualization exposed to L1" via a governed feature flag instead of using a dedicated bit/flag in vcpu_svm. Note, checking KVM's capabilities instead of the "lbrv" param means that the code isn't strictly equivalent, as lbrv_enabled could have been set if nested=false where as that the governed feature cannot. But that's a glorified nop as the feature/flag is consumed only by paths that are gated by nSVM being enabled. Link: https://lore.kernel.org/r/20230815203653.519297-12-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
1 parent 4d2a156 commit e183d17

File tree

4 files changed

+16
-14
lines changed

4 files changed

+16
-14
lines changed

arch/x86/kvm/governed_features.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@ KVM_GOVERNED_X86_FEATURE(VMX)
1111
KVM_GOVERNED_X86_FEATURE(NRIPS)
1212
KVM_GOVERNED_X86_FEATURE(TSCRATEMSR)
1313
KVM_GOVERNED_X86_FEATURE(V_VMSAVE_VMLOAD)
14+
KVM_GOVERNED_X86_FEATURE(LBRV)
1415

1516
#undef KVM_GOVERNED_X86_FEATURE
1617
#undef KVM_GOVERNED_FEATURE

arch/x86/kvm/svm/nested.c

Lines changed: 13 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -552,6 +552,7 @@ static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12
552552
bool new_vmcb12 = false;
553553
struct vmcb *vmcb01 = svm->vmcb01.ptr;
554554
struct vmcb *vmcb02 = svm->nested.vmcb02.ptr;
555+
struct kvm_vcpu *vcpu = &svm->vcpu;
555556

556557
nested_vmcb02_compute_g_pat(svm);
557558

@@ -577,18 +578,18 @@ static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12
577578
vmcb_mark_dirty(vmcb02, VMCB_DT);
578579
}
579580

580-
kvm_set_rflags(&svm->vcpu, vmcb12->save.rflags | X86_EFLAGS_FIXED);
581+
kvm_set_rflags(vcpu, vmcb12->save.rflags | X86_EFLAGS_FIXED);
581582

582-
svm_set_efer(&svm->vcpu, svm->nested.save.efer);
583+
svm_set_efer(vcpu, svm->nested.save.efer);
583584

584-
svm_set_cr0(&svm->vcpu, svm->nested.save.cr0);
585-
svm_set_cr4(&svm->vcpu, svm->nested.save.cr4);
585+
svm_set_cr0(vcpu, svm->nested.save.cr0);
586+
svm_set_cr4(vcpu, svm->nested.save.cr4);
586587

587588
svm->vcpu.arch.cr2 = vmcb12->save.cr2;
588589

589-
kvm_rax_write(&svm->vcpu, vmcb12->save.rax);
590-
kvm_rsp_write(&svm->vcpu, vmcb12->save.rsp);
591-
kvm_rip_write(&svm->vcpu, vmcb12->save.rip);
590+
kvm_rax_write(vcpu, vmcb12->save.rax);
591+
kvm_rsp_write(vcpu, vmcb12->save.rsp);
592+
kvm_rip_write(vcpu, vmcb12->save.rip);
592593

593594
/* In case we don't even reach vcpu_run, the fields are not updated */
594595
vmcb02->save.rax = vmcb12->save.rax;
@@ -602,7 +603,8 @@ static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12
602603
vmcb_mark_dirty(vmcb02, VMCB_DR);
603604
}
604605

605-
if (unlikely(svm->lbrv_enabled && (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) {
606+
if (unlikely(guest_can_use(vcpu, X86_FEATURE_LBRV) &&
607+
(svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) {
606608
/*
607609
* Reserved bits of DEBUGCTL are ignored. Be consistent with
608610
* svm_set_msr's definition of reserved bits.
@@ -734,7 +736,7 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm,
734736

735737
vmcb02->control.virt_ext = vmcb01->control.virt_ext &
736738
LBR_CTL_ENABLE_MASK;
737-
if (svm->lbrv_enabled)
739+
if (guest_can_use(vcpu, X86_FEATURE_LBRV))
738740
vmcb02->control.virt_ext |=
739741
(svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK);
740742

@@ -1065,7 +1067,8 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
10651067
if (!nested_exit_on_intr(svm))
10661068
kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
10671069

1068-
if (unlikely(svm->lbrv_enabled && (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) {
1070+
if (unlikely(guest_can_use(vcpu, X86_FEATURE_LBRV) &&
1071+
(svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) {
10691072
svm_copy_lbrs(vmcb12, vmcb02);
10701073
svm_update_lbrv(vcpu);
10711074
} else if (unlikely(vmcb01->control.virt_ext & LBR_CTL_ENABLE_MASK)) {

arch/x86/kvm/svm/svm.c

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1024,7 +1024,7 @@ void svm_update_lbrv(struct kvm_vcpu *vcpu)
10241024
bool current_enable_lbrv = !!(svm->vmcb->control.virt_ext &
10251025
LBR_CTL_ENABLE_MASK);
10261026

1027-
if (unlikely(is_guest_mode(vcpu) && svm->lbrv_enabled))
1027+
if (unlikely(is_guest_mode(vcpu) && guest_can_use(vcpu, X86_FEATURE_LBRV)))
10281028
if (unlikely(svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))
10291029
enable_lbrv = true;
10301030

@@ -4261,8 +4261,7 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
42614261

42624262
kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_NRIPS);
42634263
kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_TSCRATEMSR);
4264-
4265-
svm->lbrv_enabled = lbrv && guest_cpuid_has(vcpu, X86_FEATURE_LBRV);
4264+
kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_LBRV);
42664265

42674266
/*
42684267
* Intercept VMLOAD if the vCPU mode is Intel in order to emulate that

arch/x86/kvm/svm/svm.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -259,7 +259,6 @@ struct vcpu_svm {
259259
bool soft_int_injected;
260260

261261
/* optional nested SVM features that are enabled for this guest */
262-
bool lbrv_enabled : 1;
263262
bool pause_filter_enabled : 1;
264263
bool pause_threshold_enabled : 1;
265264
bool vgif_enabled : 1;

0 commit comments

Comments
 (0)