Skip to content

Commit 0e3223d

Browse files
sean-jcbonzini
authored andcommitted
KVM: x86/mmu: Use dummy root, backed by zero page, for !visible guest roots
When attempting to allocate a shadow root for a !visible guest root gfn, e.g. that resides in MMIO space, load a dummy root that is backed by the zero page instead of immediately synthesizing a triple fault shutdown (using the zero page ensures any attempt to translate memory will generate a !PRESENT fault and thus VM-Exit). Unless the vCPU is racing with memslot activity, KVM will inject a page fault due to not finding a visible slot in FNAME(walk_addr_generic), i.e. the end result is mostly same, but critically KVM will inject a fault only *after* KVM runs the vCPU with the bogus root. Waiting to inject a fault until after running the vCPU fixes a bug where KVM would bail from nested VM-Enter if L1 tried to run L2 with TDP enabled and a !visible root. Even though a bad root will *probably* lead to shutdown, (a) it's not guaranteed and (b) the CPU won't read the underlying memory until after VM-Enter succeeds. E.g. if L1 runs L2 with a VMX preemption timer value of '0', then architecturally the preemption timer VM-Exit is guaranteed to occur before the CPU executes any instruction, i.e. before the CPU needs to translate a GPA to a HPA (so long as there are no injected events with higher priority than the preemption timer). If KVM manages to get to FNAME(fetch) with a dummy root, e.g. because userspace created a memslot between installing the dummy root and handling the page fault, simply unload the MMU to allocate a new root and retry the instruction. Use KVM_REQ_MMU_FREE_OBSOLETE_ROOTS to drop the root, as invoking kvm_mmu_free_roots() while holding mmu_lock would deadlock, and conceptually the dummy root has indeeed become obsolete. The only difference versus existing usage of KVM_REQ_MMU_FREE_OBSOLETE_ROOTS is that the root has become obsolete due to memslot *creation*, not memslot deletion or movement. Reported-by: Reima Ishii <ishiir@g.ecc.u-tokyo.ac.jp> Cc: Yu Zhang <yu.c.zhang@linux.intel.com> Link: https://lore.kernel.org/r/20230729005200.1057358-6-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
1 parent b5b359a commit 0e3223d

File tree

4 files changed

+47
-24
lines changed

4 files changed

+47
-24
lines changed

arch/x86/kvm/mmu/mmu.c

Lines changed: 23 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -3589,7 +3589,9 @@ void kvm_mmu_free_roots(struct kvm *kvm, struct kvm_mmu *mmu,
35893589
&invalid_list);
35903590

35913591
if (free_active_root) {
3592-
if (root_to_sp(mmu->root.hpa)) {
3592+
if (kvm_mmu_is_dummy_root(mmu->root.hpa)) {
3593+
/* Nothing to cleanup for dummy roots. */
3594+
} else if (root_to_sp(mmu->root.hpa)) {
35933595
mmu_free_root_page(kvm, &mmu->root.hpa, &invalid_list);
35943596
} else if (mmu->pae_root) {
35953597
for (i = 0; i < 4; ++i) {
@@ -3637,19 +3639,6 @@ void kvm_mmu_free_guest_mode_roots(struct kvm *kvm, struct kvm_mmu *mmu)
36373639
}
36383640
EXPORT_SYMBOL_GPL(kvm_mmu_free_guest_mode_roots);
36393641

3640-
3641-
static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn)
3642-
{
3643-
int ret = 0;
3644-
3645-
if (!kvm_vcpu_is_visible_gfn(vcpu, root_gfn)) {
3646-
kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
3647-
ret = 1;
3648-
}
3649-
3650-
return ret;
3651-
}
3652-
36533642
static hpa_t mmu_alloc_root(struct kvm_vcpu *vcpu, gfn_t gfn, int quadrant,
36543643
u8 level)
36553644
{
@@ -3787,8 +3776,10 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
37873776
root_pgd = kvm_mmu_get_guest_pgd(vcpu, mmu);
37883777
root_gfn = root_pgd >> PAGE_SHIFT;
37893778

3790-
if (mmu_check_root(vcpu, root_gfn))
3791-
return 1;
3779+
if (!kvm_vcpu_is_visible_gfn(vcpu, root_gfn)) {
3780+
mmu->root.hpa = kvm_mmu_get_dummy_root();
3781+
return 0;
3782+
}
37923783

37933784
/*
37943785
* On SVM, reading PDPTRs might access guest memory, which might fault
@@ -3800,8 +3791,8 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
38003791
if (!(pdptrs[i] & PT_PRESENT_MASK))
38013792
continue;
38023793

3803-
if (mmu_check_root(vcpu, pdptrs[i] >> PAGE_SHIFT))
3804-
return 1;
3794+
if (!kvm_vcpu_is_visible_gfn(vcpu, pdptrs[i] >> PAGE_SHIFT))
3795+
pdptrs[i] = 0;
38053796
}
38063797
}
38073798

@@ -3968,7 +3959,7 @@ static bool is_unsync_root(hpa_t root)
39683959
{
39693960
struct kvm_mmu_page *sp;
39703961

3971-
if (!VALID_PAGE(root))
3962+
if (!VALID_PAGE(root) || kvm_mmu_is_dummy_root(root))
39723963
return false;
39733964

39743965
/*
@@ -4374,6 +4365,10 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
43744365
{
43754366
int r;
43764367

4368+
/* Dummy roots are used only for shadowing bad guest roots. */
4369+
if (WARN_ON_ONCE(kvm_mmu_is_dummy_root(vcpu->arch.mmu->root.hpa)))
4370+
return RET_PF_RETRY;
4371+
43774372
if (page_fault_handle_page_track(vcpu, fault))
43784373
return RET_PF_EMULATE;
43794374

@@ -4609,9 +4604,8 @@ static bool fast_pgd_switch(struct kvm *kvm, struct kvm_mmu *mmu,
46094604
gpa_t new_pgd, union kvm_mmu_page_role new_role)
46104605
{
46114606
/*
4612-
* For now, limit the caching to 64-bit hosts+VMs in order to avoid
4613-
* having to deal with PDPTEs. We may add support for 32-bit hosts/VMs
4614-
* later if necessary.
4607+
* Limit reuse to 64-bit hosts+VMs without "special" roots in order to
4608+
* avoid having to deal with PDPTEs and other complexities.
46154609
*/
46164610
if (VALID_PAGE(mmu->root.hpa) && !root_to_sp(mmu->root.hpa))
46174611
kvm_mmu_free_roots(kvm, mmu, KVM_MMU_ROOT_CURRENT);
@@ -5510,14 +5504,19 @@ static bool is_obsolete_root(struct kvm *kvm, hpa_t root_hpa)
55105504

55115505
/*
55125506
* When freeing obsolete roots, treat roots as obsolete if they don't
5513-
* have an associated shadow page. This does mean KVM will get false
5507+
* have an associated shadow page, as it's impossible to determine if
5508+
* such roots are fresh or stale. This does mean KVM will get false
55145509
* positives and free roots that don't strictly need to be freed, but
55155510
* such false positives are relatively rare:
55165511
*
5517-
* (a) only PAE paging and nested NPT has roots without shadow pages
5512+
* (a) only PAE paging and nested NPT have roots without shadow pages
5513+
* (or any shadow paging flavor with a dummy root, see note below)
55185514
* (b) remote reloads due to a memslot update obsoletes _all_ roots
55195515
* (c) KVM doesn't track previous roots for PAE paging, and the guest
55205516
* is unlikely to zap an in-use PGD.
5517+
*
5518+
* Note! Dummy roots are unique in that they are obsoleted by memslot
5519+
* _creation_! See also FNAME(fetch).
55215520
*/
55225521
sp = root_to_sp(root_hpa);
55235522
return !sp || is_obsolete_sp(kvm, sp);

arch/x86/kvm/mmu/mmu_internal.h

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,16 @@
3636
#define INVALID_PAE_ROOT 0
3737
#define IS_VALID_PAE_ROOT(x) (!!(x))
3838

39+
static inline hpa_t kvm_mmu_get_dummy_root(void)
40+
{
41+
return my_zero_pfn(0) << PAGE_SHIFT;
42+
}
43+
44+
static inline bool kvm_mmu_is_dummy_root(hpa_t shadow_page)
45+
{
46+
return is_zero_pfn(shadow_page >> PAGE_SHIFT);
47+
}
48+
3949
typedef u64 __rcu *tdp_ptep_t;
4050

4151
struct kvm_mmu_page {

arch/x86/kvm/mmu/paging_tmpl.h

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -651,6 +651,17 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
651651
if (WARN_ON_ONCE(!VALID_PAGE(vcpu->arch.mmu->root.hpa)))
652652
goto out_gpte_changed;
653653

654+
/*
655+
* Load a new root and retry the faulting instruction in the extremely
656+
* unlikely scenario that the guest root gfn became visible between
657+
* loading a dummy root and handling the resulting page fault, e.g. if
658+
* userspace create a memslot in the interim.
659+
*/
660+
if (unlikely(kvm_mmu_is_dummy_root(vcpu->arch.mmu->root.hpa))) {
661+
kvm_make_request(KVM_REQ_MMU_FREE_OBSOLETE_ROOTS, vcpu);
662+
goto out_gpte_changed;
663+
}
664+
654665
for_each_shadow_entry(vcpu, fault->addr, it) {
655666
gfn_t table_gfn;
656667

arch/x86/kvm/mmu/spte.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -238,6 +238,9 @@ static inline struct kvm_mmu_page *sptep_to_sp(u64 *sptep)
238238

239239
static inline struct kvm_mmu_page *root_to_sp(hpa_t root)
240240
{
241+
if (kvm_mmu_is_dummy_root(root))
242+
return NULL;
243+
241244
/*
242245
* The "root" may be a special root, e.g. a PAE entry, treat it as a
243246
* SPTE to ensure any non-PA bits are dropped.

0 commit comments

Comments
 (0)