Skip to content

Commit 250ce1b

Browse files
bonzinisean-jc
authored andcommitted
KVM: x86/mmu: always take tdp_mmu_pages_lock
It is cheap to take tdp_mmu_pages_lock in all write-side critical sections. We already do it all the time when zapping with read_lock(), so it is not a problem to do it from the kvm_tdp_mmu_zap_all() path (aka kvm_arch_flush_shadow_all(), aka VM destruction and MMU notifier release). Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Link: https://lore.kernel.org/r/20231125083400.1399197-4-pbonzini@redhat.com Signed-off-by: Sean Christopherson <seanjc@google.com>
1 parent 484dd27 commit 250ce1b

File tree

3 files changed

+13
-29
lines changed

3 files changed

+13
-29
lines changed

Documentation/virt/kvm/locking.rst

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -43,10 +43,9 @@ On x86:
4343

4444
- vcpu->mutex is taken outside kvm->arch.hyperv.hv_lock and kvm->arch.xen.xen_lock
4545

46-
- kvm->arch.mmu_lock is an rwlock. kvm->arch.tdp_mmu_pages_lock and
47-
kvm->arch.mmu_unsync_pages_lock are taken inside kvm->arch.mmu_lock, and
48-
cannot be taken without already holding kvm->arch.mmu_lock (typically with
49-
``read_lock`` for the TDP MMU, thus the need for additional spinlocks).
46+
- kvm->arch.mmu_lock is an rwlock; critical sections for
47+
kvm->arch.tdp_mmu_pages_lock and kvm->arch.mmu_unsync_pages_lock must
48+
also take kvm->arch.mmu_lock
5049

5150
Everything else is a leaf: no other lock is taken inside the critical
5251
sections.

arch/x86/include/asm/kvm_host.h

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1407,9 +1407,8 @@ struct kvm_arch {
14071407
* the MMU lock in read mode + RCU or
14081408
* the MMU lock in write mode
14091409
*
1410-
* For writes, this list is protected by:
1411-
* the MMU lock in read mode + the tdp_mmu_pages_lock or
1412-
* the MMU lock in write mode
1410+
* For writes, this list is protected by tdp_mmu_pages_lock; see
1411+
* below for the details.
14131412
*
14141413
* Roots will remain in the list until their tdp_mmu_root_count
14151414
* drops to zero, at which point the thread that decremented the
@@ -1426,8 +1425,10 @@ struct kvm_arch {
14261425
* - possible_nx_huge_pages;
14271426
* - the possible_nx_huge_page_link field of kvm_mmu_page structs used
14281427
* by the TDP MMU
1429-
* It is acceptable, but not necessary, to acquire this lock when
1430-
* the thread holds the MMU lock in write mode.
1428+
* Because the lock is only taken within the MMU lock, strictly
1429+
* speaking it is redundant to acquire this lock when the thread
1430+
* holds the MMU lock in write mode. However it often simplifies
1431+
* the code to do so.
14311432
*/
14321433
spinlock_t tdp_mmu_pages_lock;
14331434
#endif /* CONFIG_X86_64 */

arch/x86/kvm/mmu/tdp_mmu.c

Lines changed: 4 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -75,12 +75,6 @@ static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head)
7575

7676
void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root)
7777
{
78-
/*
79-
* Either read or write is okay, but mmu_lock must be held because
80-
* writers are not required to take tdp_mmu_pages_lock.
81-
*/
82-
lockdep_assert_held(&kvm->mmu_lock);
83-
8478
if (!refcount_dec_and_test(&root->tdp_mmu_root_count))
8579
return;
8680

@@ -281,28 +275,18 @@ static void tdp_unaccount_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
281275
*
282276
* @kvm: kvm instance
283277
* @sp: the page to be removed
284-
* @shared: This operation may not be running under the exclusive use of
285-
* the MMU lock and the operation must synchronize with other
286-
* threads that might be adding or removing pages.
287278
*/
288-
static void tdp_mmu_unlink_sp(struct kvm *kvm, struct kvm_mmu_page *sp,
289-
bool shared)
279+
static void tdp_mmu_unlink_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
290280
{
291281
tdp_unaccount_mmu_page(kvm, sp);
292282

293283
if (!sp->nx_huge_page_disallowed)
294284
return;
295285

296-
if (shared)
297-
spin_lock(&kvm->arch.tdp_mmu_pages_lock);
298-
else
299-
lockdep_assert_held_write(&kvm->mmu_lock);
300-
286+
spin_lock(&kvm->arch.tdp_mmu_pages_lock);
301287
sp->nx_huge_page_disallowed = false;
302288
untrack_possible_nx_huge_page(kvm, sp);
303-
304-
if (shared)
305-
spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
289+
spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
306290
}
307291

308292
/**
@@ -331,7 +315,7 @@ static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared)
331315

332316
trace_kvm_mmu_prepare_zap_page(sp);
333317

334-
tdp_mmu_unlink_sp(kvm, sp, shared);
318+
tdp_mmu_unlink_sp(kvm, sp);
335319

336320
for (i = 0; i < SPTE_ENT_PER_PAGE; i++) {
337321
tdp_ptep_t sptep = pt + i;

0 commit comments

Comments
 (0)