Skip to content

Commit 94faba8

Browse files
yamahatabonzini
authored andcommitted
KVM: x86/tdp_mmu: Propagate tearing down mirror page tables
Integrate hooks for mirroring page table operations for cases where TDX will zap PTEs or free page tables. Like other Coco technologies, TDX has the concept of private and shared memory. For TDX the private and shared mappings are managed on separate EPT roots. The private half is managed indirectly though calls into a protected runtime environment called the TDX module, where the shared half is managed within KVM in normal page tables. Since calls into the TDX module are relatively slow, walking private page tables by making calls into the TDX module would not be efficient. Because of this, previous changes have taught the TDP MMU to keep a mirror root, which is separate, unmapped TDP root that private operations can be directed to. Currently this root is disconnected from the guest. Now add plumbing to propagate changes to the "external" page tables being mirrored. Just create the x86_ops for now, leave plumbing the operations into the TDX module for future patches. Add two operations for tearing down page tables, one for freeing page tables (free_external_spt) and one for zapping PTEs (remove_external_spte). Define them such that remove_external_spte will perform a TLB flush as well. (in TDX terms "ensure there are no active translations"). TDX MMU support will exclude certain MMU operations, so only plug in the mirroring x86 ops where they will be needed. For zapping/freeing, only hook tdp_mmu_iter_set_spte() which is used for mapping and linking PTs. Don't bother hooking tdp_mmu_set_spte_atomic() as it is only used for zapping PTEs in operations unsupported by TDX: zapping collapsible PTEs and kvm_mmu_zap_all_fast(). In previous changes to address races around concurrent populating using tdp_mmu_set_spte_atomic(), a solution was introduced to temporarily set FROZEN_SPTE in the mirrored page tables while performing the external operations. Such a solution is not needed for the tear down paths in TDX as these will always be performed with the mmu_lock held for write. Sprinkle some KVM_BUG_ON()s to reflect this. Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com> Co-developed-by: Kai Huang <kai.huang@intel.com> Signed-off-by: Kai Huang <kai.huang@intel.com> Co-developed-by: Yan Zhao <yan.y.zhao@intel.com> Signed-off-by: Yan Zhao <yan.y.zhao@intel.com> Co-developed-by: Rick Edgecombe <rick.p.edgecombe@intel.com> Signed-off-by: Rick Edgecombe <rick.p.edgecombe@intel.com> Message-ID: <20240718211230.1492011-16-rick.p.edgecombe@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
1 parent 77ac707 commit 94faba8

File tree

3 files changed

+60
-1
lines changed

3 files changed

+60
-1
lines changed

arch/x86/include/asm/kvm-x86-ops.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -96,6 +96,8 @@ KVM_X86_OP_OPTIONAL_RET0(get_mt_mask)
9696
KVM_X86_OP(load_mmu_pgd)
9797
KVM_X86_OP_OPTIONAL(link_external_spt)
9898
KVM_X86_OP_OPTIONAL(set_external_spte)
99+
KVM_X86_OP_OPTIONAL(free_external_spt)
100+
KVM_X86_OP_OPTIONAL(remove_external_spte)
99101
KVM_X86_OP(has_wbinvd_exit)
100102
KVM_X86_OP(get_l2_tsc_offset)
101103
KVM_X86_OP(get_l2_tsc_multiplier)

arch/x86/include/asm/kvm_host.h

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1766,6 +1766,14 @@ struct kvm_x86_ops {
17661766
int (*set_external_spte)(struct kvm *kvm, gfn_t gfn, enum pg_level level,
17671767
kvm_pfn_t pfn_for_gfn);
17681768

1769+
/* Update external page tables for page table about to be freed. */
1770+
int (*free_external_spt)(struct kvm *kvm, gfn_t gfn, enum pg_level level,
1771+
void *external_spt);
1772+
1773+
/* Update external page table from spte getting removed, and flush TLB. */
1774+
int (*remove_external_spte)(struct kvm *kvm, gfn_t gfn, enum pg_level level,
1775+
kvm_pfn_t pfn_for_gfn);
1776+
17691777
bool (*has_wbinvd_exit)(void);
17701778

17711779
u64 (*get_l2_tsc_offset)(struct kvm_vcpu *vcpu);

arch/x86/kvm/mmu/tdp_mmu.c

Lines changed: 50 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -340,6 +340,29 @@ static void tdp_mmu_unlink_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
340340
spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
341341
}
342342

343+
static void remove_external_spte(struct kvm *kvm, gfn_t gfn, u64 old_spte,
344+
int level)
345+
{
346+
kvm_pfn_t old_pfn = spte_to_pfn(old_spte);
347+
int ret;
348+
349+
/*
350+
* External (TDX) SPTEs are limited to PG_LEVEL_4K, and external
351+
* PTs are removed in a special order, involving free_external_spt().
352+
* But remove_external_spte() will be called on non-leaf PTEs via
353+
* __tdp_mmu_zap_root(), so avoid the error the former would return
354+
* in this case.
355+
*/
356+
if (!is_last_spte(old_spte, level))
357+
return;
358+
359+
/* Zapping leaf spte is allowed only when write lock is held. */
360+
lockdep_assert_held_write(&kvm->mmu_lock);
361+
/* Because write lock is held, operation should success. */
362+
ret = static_call(kvm_x86_remove_external_spte)(kvm, gfn, level, old_pfn);
363+
KVM_BUG_ON(ret, kvm);
364+
}
365+
343366
/**
344367
* handle_removed_pt() - handle a page table removed from the TDP structure
345368
*
@@ -435,6 +458,23 @@ static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared)
435458
}
436459
handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn,
437460
old_spte, FROZEN_SPTE, level, shared);
461+
462+
if (is_mirror_sp(sp)) {
463+
KVM_BUG_ON(shared, kvm);
464+
remove_external_spte(kvm, gfn, old_spte, level);
465+
}
466+
}
467+
468+
if (is_mirror_sp(sp) &&
469+
WARN_ON(static_call(kvm_x86_free_external_spt)(kvm, base_gfn, sp->role.level,
470+
sp->external_spt))) {
471+
/*
472+
* Failed to free page table page in mirror page table and
473+
* there is nothing to do further.
474+
* Intentionally leak the page to prevent the kernel from
475+
* accessing the encrypted page.
476+
*/
477+
sp->external_spt = NULL;
438478
}
439479

440480
call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback);
@@ -608,6 +648,13 @@ static inline int __must_check __tdp_mmu_set_spte_atomic(struct kvm *kvm,
608648
if (is_mirror_sptep(iter->sptep) && !is_frozen_spte(new_spte)) {
609649
int ret;
610650

651+
/*
652+
* Users of atomic zapping don't operate on mirror roots,
653+
* so don't handle it and bug the VM if it's seen.
654+
*/
655+
if (KVM_BUG_ON(!is_shadow_present_pte(new_spte), kvm))
656+
return -EBUSY;
657+
611658
ret = set_external_spte_present(kvm, iter->sptep, iter->gfn,
612659
iter->old_spte, new_spte, iter->level);
613660
if (ret)
@@ -700,8 +747,10 @@ static u64 tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep,
700747
* Users that do non-atomic setting of PTEs don't operate on mirror
701748
* roots, so don't handle it and bug the VM if it's seen.
702749
*/
703-
if (is_mirror_sptep(sptep))
750+
if (is_mirror_sptep(sptep)) {
704751
KVM_BUG_ON(is_shadow_present_pte(new_spte), kvm);
752+
remove_external_spte(kvm, gfn, old_spte, level);
753+
}
705754

706755
return old_spte;
707756
}

0 commit comments

Comments
 (0)