Skip to content

Commit 4c08e73

Browse files
committed
KVM: SVM: Take and hold ir_list_lock when updating vCPU's Physical ID entry
Hoist the acquisition of ir_list_lock from avic_update_iommu_vcpu_affinity() to its two callers, avic_vcpu_load() and avic_vcpu_put(), specifically to encapsulate the write to the vCPU's entry in the AVIC Physical ID table. This will allow a future fix to pull information from the Physical ID entry when updating the IRTE, without potentially consuming stale information, i.e. without racing with the vCPU being (un)loaded. Add a comment to call out that ir_list_lock does NOT protect against multiple writers, specifically that reading the Physical ID entry in avic_vcpu_put() outside of the lock is safe. To preserve some semblance of independence from ir_list_lock, keep the READ_ONCE() in avic_vcpu_load() even though acuiring the spinlock effectively ensures the load(s) will be generated after acquiring the lock. Cc: stable@vger.kernel.org Tested-by: Alejandro Jimenez <alejandro.j.jimenez@oracle.com> Reviewed-by: Joao Martins <joao.m.martins@oracle.com> Link: https://lore.kernel.org/r/20230808233132.2499764-2-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
1 parent a85cd52 commit 4c08e73

File tree

1 file changed

+23
-8
lines changed

1 file changed

+23
-8
lines changed

arch/x86/kvm/svm/avic.c

Lines changed: 23 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -986,37 +986,35 @@ static inline int
986986
avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu, bool r)
987987
{
988988
int ret = 0;
989-
unsigned long flags;
990989
struct amd_svm_iommu_ir *ir;
991990
struct vcpu_svm *svm = to_svm(vcpu);
992991

992+
lockdep_assert_held(&svm->ir_list_lock);
993+
993994
if (!kvm_arch_has_assigned_device(vcpu->kvm))
994995
return 0;
995996

996997
/*
997998
* Here, we go through the per-vcpu ir_list to update all existing
998999
* interrupt remapping table entry targeting this vcpu.
9991000
*/
1000-
spin_lock_irqsave(&svm->ir_list_lock, flags);
1001-
10021001
if (list_empty(&svm->ir_list))
1003-
goto out;
1002+
return 0;
10041003

10051004
list_for_each_entry(ir, &svm->ir_list, node) {
10061005
ret = amd_iommu_update_ga(cpu, r, ir->data);
10071006
if (ret)
1008-
break;
1007+
return ret;
10091008
}
1010-
out:
1011-
spin_unlock_irqrestore(&svm->ir_list_lock, flags);
1012-
return ret;
1009+
return 0;
10131010
}
10141011

10151012
void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
10161013
{
10171014
u64 entry;
10181015
int h_physical_id = kvm_cpu_get_apicid(cpu);
10191016
struct vcpu_svm *svm = to_svm(vcpu);
1017+
unsigned long flags;
10201018

10211019
lockdep_assert_preemption_disabled();
10221020

@@ -1033,6 +1031,8 @@ void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
10331031
if (kvm_vcpu_is_blocking(vcpu))
10341032
return;
10351033

1034+
spin_lock_irqsave(&svm->ir_list_lock, flags);
1035+
10361036
entry = READ_ONCE(*(svm->avic_physical_id_cache));
10371037
WARN_ON_ONCE(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK);
10381038

@@ -1042,25 +1042,40 @@ void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
10421042

10431043
WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
10441044
avic_update_iommu_vcpu_affinity(vcpu, h_physical_id, true);
1045+
1046+
spin_unlock_irqrestore(&svm->ir_list_lock, flags);
10451047
}
10461048

10471049
void avic_vcpu_put(struct kvm_vcpu *vcpu)
10481050
{
10491051
u64 entry;
10501052
struct vcpu_svm *svm = to_svm(vcpu);
1053+
unsigned long flags;
10511054

10521055
lockdep_assert_preemption_disabled();
10531056

1057+
/*
1058+
* Note, reading the Physical ID entry outside of ir_list_lock is safe
1059+
* as only the pCPU that has loaded (or is loading) the vCPU is allowed
1060+
* to modify the entry, and preemption is disabled. I.e. the vCPU
1061+
* can't be scheduled out and thus avic_vcpu_{put,load}() can't run
1062+
* recursively.
1063+
*/
10541064
entry = READ_ONCE(*(svm->avic_physical_id_cache));
10551065

10561066
/* Nothing to do if IsRunning == '0' due to vCPU blocking. */
10571067
if (!(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK))
10581068
return;
10591069

1070+
spin_lock_irqsave(&svm->ir_list_lock, flags);
1071+
10601072
avic_update_iommu_vcpu_affinity(vcpu, -1, 0);
10611073

10621074
entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
10631075
WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
1076+
1077+
spin_unlock_irqrestore(&svm->ir_list_lock, flags);
1078+
10641079
}
10651080

10661081
void avic_refresh_virtual_apic_mode(struct kvm_vcpu *vcpu)

0 commit comments

Comments
 (0)