Skip to content

Commit aa34b81

Browse files
48casean-jc
authored andcommitted
KVM: Allow lockless walk of SPTEs when handing aging mmu_notifier event
It is possible to correctly do aging without taking the KVM MMU lock, or while taking it for read; add a Kconfig to let architectures do so. Architectures that select KVM_MMU_LOCKLESS_AGING are responsible for correctness. Suggested-by: Yu Zhao <yuzhao@google.com> Signed-off-by: James Houghton <jthoughton@google.com> Reviewed-by: David Matlack <dmatlack@google.com> Link: https://lore.kernel.org/r/20250204004038.1680123-3-jthoughton@google.com [sean: massage shortlog+changelog, fix Kconfig goof and shorten name] Signed-off-by: Sean Christopherson <seanjc@google.com>
1 parent 374ccd6 commit aa34b81

File tree

3 files changed

+21
-7
lines changed

3 files changed

+21
-7
lines changed

include/linux/kvm_host.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -267,6 +267,7 @@ struct kvm_gfn_range {
267267
union kvm_mmu_notifier_arg arg;
268268
enum kvm_gfn_range_filter attr_filter;
269269
bool may_block;
270+
bool lockless;
270271
};
271272
bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
272273
bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);

virt/kvm/Kconfig

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -104,6 +104,10 @@ config KVM_ELIDE_TLB_FLUSH_IF_YOUNG
104104
depends on KVM_GENERIC_MMU_NOTIFIER
105105
bool
106106

107+
config KVM_MMU_LOCKLESS_AGING
108+
depends on KVM_GENERIC_MMU_NOTIFIER
109+
bool
110+
107111
config KVM_GENERIC_MEMORY_ATTRIBUTES
108112
depends on KVM_GENERIC_MMU_NOTIFIER
109113
bool

virt/kvm/kvm_main.c

Lines changed: 16 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -517,6 +517,7 @@ struct kvm_mmu_notifier_range {
517517
on_lock_fn_t on_lock;
518518
bool flush_on_ret;
519519
bool may_block;
520+
bool lockless;
520521
};
521522

522523
/*
@@ -571,6 +572,10 @@ static __always_inline kvm_mn_ret_t kvm_handle_hva_range(struct kvm *kvm,
571572
IS_KVM_NULL_FN(range->handler)))
572573
return r;
573574

575+
/* on_lock will never be called for lockless walks */
576+
if (WARN_ON_ONCE(range->lockless && !IS_KVM_NULL_FN(range->on_lock)))
577+
return r;
578+
574579
idx = srcu_read_lock(&kvm->srcu);
575580

576581
for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
@@ -607,15 +612,18 @@ static __always_inline kvm_mn_ret_t kvm_handle_hva_range(struct kvm *kvm,
607612
gfn_range.start = hva_to_gfn_memslot(hva_start, slot);
608613
gfn_range.end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, slot);
609614
gfn_range.slot = slot;
615+
gfn_range.lockless = range->lockless;
610616

611617
if (!r.found_memslot) {
612618
r.found_memslot = true;
613-
KVM_MMU_LOCK(kvm);
614-
if (!IS_KVM_NULL_FN(range->on_lock))
615-
range->on_lock(kvm);
616-
617-
if (IS_KVM_NULL_FN(range->handler))
618-
goto mmu_unlock;
619+
if (!range->lockless) {
620+
KVM_MMU_LOCK(kvm);
621+
if (!IS_KVM_NULL_FN(range->on_lock))
622+
range->on_lock(kvm);
623+
624+
if (IS_KVM_NULL_FN(range->handler))
625+
goto mmu_unlock;
626+
}
619627
}
620628
r.ret |= range->handler(kvm, &gfn_range);
621629
}
@@ -625,7 +633,7 @@ static __always_inline kvm_mn_ret_t kvm_handle_hva_range(struct kvm *kvm,
625633
kvm_flush_remote_tlbs(kvm);
626634

627635
mmu_unlock:
628-
if (r.found_memslot)
636+
if (r.found_memslot && !range->lockless)
629637
KVM_MMU_UNLOCK(kvm);
630638

631639
srcu_read_unlock(&kvm->srcu, idx);
@@ -647,6 +655,7 @@ static __always_inline int kvm_age_hva_range(struct mmu_notifier *mn,
647655
.on_lock = (void *)kvm_null_fn,
648656
.flush_on_ret = flush_on_ret,
649657
.may_block = false,
658+
.lockless = IS_ENABLED(CONFIG_KVM_MMU_LOCKLESS_AGING),
650659
};
651660

652661
return kvm_handle_hva_range(kvm, &range).ret;

0 commit comments

Comments
 (0)