Skip to content

Commit 9129633

Browse files
committed
KVM: x86/mmu: Prevent installing hugepages when mem attributes are changing
When changing memory attributes on a subset of a potential hugepage, add the hugepage to the invalidation range tracking to prevent installing a hugepage until the attributes are fully updated. Like the actual hugepage tracking updates in kvm_arch_post_set_memory_attributes(), process only the head and tail pages, as any potential hugepages that are entirely covered by the range will already be tracked. Note, only hugepage chunks whose current attributes are NOT mixed need to be added to the invalidation set, as mixed attributes already prevent installing a hugepage, and it's perfectly safe to install a smaller mapping for a gfn whose attributes aren't changing. Fixes: 8dd2eee ("KVM: x86/mmu: Handle page fault for private memory") Cc: stable@vger.kernel.org Reported-by: Michael Roth <michael.roth@amd.com> Tested-by: Michael Roth <michael.roth@amd.com> Link: https://lore.kernel.org/r/20250430220954.522672-1-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
1 parent 5fea0c6 commit 9129633

File tree

1 file changed

+53
-16
lines changed

1 file changed

+53
-16
lines changed

arch/x86/kvm/mmu/mmu.c

Lines changed: 53 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -7670,9 +7670,30 @@ void kvm_mmu_pre_destroy_vm(struct kvm *kvm)
76707670
}
76717671

76727672
#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
7673+
static bool hugepage_test_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
7674+
int level)
7675+
{
7676+
return lpage_info_slot(gfn, slot, level)->disallow_lpage & KVM_LPAGE_MIXED_FLAG;
7677+
}
7678+
7679+
static void hugepage_clear_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
7680+
int level)
7681+
{
7682+
lpage_info_slot(gfn, slot, level)->disallow_lpage &= ~KVM_LPAGE_MIXED_FLAG;
7683+
}
7684+
7685+
static void hugepage_set_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
7686+
int level)
7687+
{
7688+
lpage_info_slot(gfn, slot, level)->disallow_lpage |= KVM_LPAGE_MIXED_FLAG;
7689+
}
7690+
76737691
bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
76747692
struct kvm_gfn_range *range)
76757693
{
7694+
struct kvm_memory_slot *slot = range->slot;
7695+
int level;
7696+
76767697
/*
76777698
* Zap SPTEs even if the slot can't be mapped PRIVATE. KVM x86 only
76787699
* supports KVM_MEMORY_ATTRIBUTE_PRIVATE, and so it *seems* like KVM
@@ -7687,6 +7708,38 @@ bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
76877708
if (WARN_ON_ONCE(!kvm_arch_has_private_mem(kvm)))
76887709
return false;
76897710

7711+
if (WARN_ON_ONCE(range->end <= range->start))
7712+
return false;
7713+
7714+
/*
7715+
* If the head and tail pages of the range currently allow a hugepage,
7716+
* i.e. reside fully in the slot and don't have mixed attributes, then
7717+
* add each corresponding hugepage range to the ongoing invalidation,
7718+
* e.g. to prevent KVM from creating a hugepage in response to a fault
7719+
* for a gfn whose attributes aren't changing. Note, only the range
7720+
* of gfns whose attributes are being modified needs to be explicitly
7721+
* unmapped, as that will unmap any existing hugepages.
7722+
*/
7723+
for (level = PG_LEVEL_2M; level <= KVM_MAX_HUGEPAGE_LEVEL; level++) {
7724+
gfn_t start = gfn_round_for_level(range->start, level);
7725+
gfn_t end = gfn_round_for_level(range->end - 1, level);
7726+
gfn_t nr_pages = KVM_PAGES_PER_HPAGE(level);
7727+
7728+
if ((start != range->start || start + nr_pages > range->end) &&
7729+
start >= slot->base_gfn &&
7730+
start + nr_pages <= slot->base_gfn + slot->npages &&
7731+
!hugepage_test_mixed(slot, start, level))
7732+
kvm_mmu_invalidate_range_add(kvm, start, start + nr_pages);
7733+
7734+
if (end == start)
7735+
continue;
7736+
7737+
if ((end + nr_pages) > range->end &&
7738+
(end + nr_pages) <= (slot->base_gfn + slot->npages) &&
7739+
!hugepage_test_mixed(slot, end, level))
7740+
kvm_mmu_invalidate_range_add(kvm, end, end + nr_pages);
7741+
}
7742+
76907743
/* Unmap the old attribute page. */
76917744
if (range->arg.attributes & KVM_MEMORY_ATTRIBUTE_PRIVATE)
76927745
range->attr_filter = KVM_FILTER_SHARED;
@@ -7696,23 +7749,7 @@ bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
76967749
return kvm_unmap_gfn_range(kvm, range);
76977750
}
76987751

7699-
static bool hugepage_test_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
7700-
int level)
7701-
{
7702-
return lpage_info_slot(gfn, slot, level)->disallow_lpage & KVM_LPAGE_MIXED_FLAG;
7703-
}
7704-
7705-
static void hugepage_clear_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
7706-
int level)
7707-
{
7708-
lpage_info_slot(gfn, slot, level)->disallow_lpage &= ~KVM_LPAGE_MIXED_FLAG;
7709-
}
77107752

7711-
static void hugepage_set_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
7712-
int level)
7713-
{
7714-
lpage_info_slot(gfn, slot, level)->disallow_lpage |= KVM_LPAGE_MIXED_FLAG;
7715-
}
77167753

77177754
static bool hugepage_has_attrs(struct kvm *kvm, struct kvm_memory_slot *slot,
77187755
gfn_t gfn, int level, unsigned long attrs)

0 commit comments

Comments
 (0)