Skip to content

Commit 4cf20d4

Browse files
shvipinsean-jc
authored andcommitted
KVM: x86/mmu: Drop per-VM zapped_obsolete_pages list
Drop the per-VM zapped_obsolete_pages list now that the usage from the defunct mmu_shrinker is gone, and instead use a local list to track pages in kvm_zap_obsolete_pages(), the sole remaining user of zapped_obsolete_pages. Opportunistically add an assertion to verify and document that slots_lock must be held, i.e. that there can only be one active instance of kvm_zap_obsolete_pages() at any given time, and by doing so also prove that using a local list instead of a per-VM list doesn't change any functionality (beyond trivialities like list initialization). Signed-off-by: Vipin Sharma <vipinsh@google.com> Link: https://lore.kernel.org/r/20241101201437.1604321-2-vipinsh@google.com [sean: split to separate patch, write changelog] Signed-off-by: Sean Christopherson <seanjc@google.com>
1 parent fe140e6 commit 4cf20d4

File tree

2 files changed

+5
-4
lines changed

2 files changed

+5
-4
lines changed

arch/x86/include/asm/kvm_host.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1306,7 +1306,6 @@ struct kvm_arch {
13061306
bool pre_fault_allowed;
13071307
struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
13081308
struct list_head active_mmu_pages;
1309-
struct list_head zapped_obsolete_pages;
13101309
/*
13111310
* A list of kvm_mmu_page structs that, if zapped, could possibly be
13121311
* replaced by an NX huge page. A shadow page is on this list if its

arch/x86/kvm/mmu/mmu.c

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6367,8 +6367,11 @@ static void kvm_zap_obsolete_pages(struct kvm *kvm)
63676367
{
63686368
struct kvm_mmu_page *sp, *node;
63696369
int nr_zapped, batch = 0;
6370+
LIST_HEAD(invalid_list);
63706371
bool unstable;
63716372

6373+
lockdep_assert_held(&kvm->slots_lock);
6374+
63726375
restart:
63736376
list_for_each_entry_safe_reverse(sp, node,
63746377
&kvm->arch.active_mmu_pages, link) {
@@ -6400,7 +6403,7 @@ static void kvm_zap_obsolete_pages(struct kvm *kvm)
64006403
}
64016404

64026405
unstable = __kvm_mmu_prepare_zap_page(kvm, sp,
6403-
&kvm->arch.zapped_obsolete_pages, &nr_zapped);
6406+
&invalid_list, &nr_zapped);
64046407
batch += nr_zapped;
64056408

64066409
if (unstable)
@@ -6416,7 +6419,7 @@ static void kvm_zap_obsolete_pages(struct kvm *kvm)
64166419
* kvm_mmu_load()), and the reload in the caller ensure no vCPUs are
64176420
* running with an obsolete MMU.
64186421
*/
6419-
kvm_mmu_commit_zap_page(kvm, &kvm->arch.zapped_obsolete_pages);
6422+
kvm_mmu_commit_zap_page(kvm, &invalid_list);
64206423
}
64216424

64226425
/*
@@ -6483,7 +6486,6 @@ void kvm_mmu_init_vm(struct kvm *kvm)
64836486
{
64846487
kvm->arch.shadow_mmio_value = shadow_mmio_value;
64856488
INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
6486-
INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
64876489
INIT_LIST_HEAD(&kvm->arch.possible_nx_huge_pages);
64886490
spin_lock_init(&kvm->arch.mmu_unsync_pages_lock);
64896491

0 commit comments

Comments
 (0)