Skip to content

Commit 2608f10

Browse files
yamahatabonzini
authored andcommitted
KVM: x86/tdp_mmu: Add a helper function to walk down the TDP MMU
Export a function to walk down the TDP without modifying it and simply check if a GPA is mapped. Future changes will support pre-populating TDX private memory. In order to implement this KVM will need to check if a given GFN is already pre-populated in the mirrored EPT. [1] There is already a TDP MMU walker, kvm_tdp_mmu_get_walk() for use within the KVM MMU that almost does what is required. However, to make sense of the results, MMU internal PTE helpers are needed. Refactor the code to provide a helper that can be used outside of the KVM MMU code. Refactoring the KVM page fault handler to support this lookup usage was also considered, but it was an awkward fit. kvm_tdp_mmu_gpa_is_mapped() is based on a diff by Paolo Bonzini. Link: https://lore.kernel.org/kvm/ZfBkle1eZFfjPI8l@google.com/ [1] Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com> Co-developed-by: Rick Edgecombe <rick.p.edgecombe@intel.com> Signed-off-by: Rick Edgecombe <rick.p.edgecombe@intel.com> Signed-off-by: Yan Zhao <yan.y.zhao@intel.com> Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> Message-ID: <20241112073457.22011-1-yan.y.zhao@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
1 parent ae80c7d commit 2608f10

File tree

3 files changed

+36
-7
lines changed

3 files changed

+36
-7
lines changed

arch/x86/kvm/mmu.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -253,6 +253,9 @@ extern bool tdp_mmu_enabled;
253253
#define tdp_mmu_enabled false
254254
#endif
255255

256+
bool kvm_tdp_mmu_gpa_is_mapped(struct kvm_vcpu *vcpu, u64 gpa);
257+
int kvm_tdp_map_page(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code, u8 *level);
258+
256259
static inline bool kvm_memslots_have_rmaps(struct kvm *kvm)
257260
{
258261
return !tdp_mmu_enabled || kvm_shadow_root_allocated(kvm);

arch/x86/kvm/mmu/mmu.c

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4685,8 +4685,7 @@ int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
46854685
return direct_page_fault(vcpu, fault);
46864686
}
46874687

4688-
static int kvm_tdp_map_page(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code,
4689-
u8 *level)
4688+
int kvm_tdp_map_page(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code, u8 *level)
46904689
{
46914690
int r;
46924691

arch/x86/kvm/mmu/tdp_mmu.c

Lines changed: 32 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1894,16 +1894,13 @@ bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
18941894
*
18951895
* Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}.
18961896
*/
1897-
int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
1898-
int *root_level)
1897+
static int __kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
1898+
struct kvm_mmu_page *root)
18991899
{
1900-
struct kvm_mmu_page *root = root_to_sp(vcpu->arch.mmu->root.hpa);
19011900
struct tdp_iter iter;
19021901
gfn_t gfn = addr >> PAGE_SHIFT;
19031902
int leaf = -1;
19041903

1905-
*root_level = vcpu->arch.mmu->root_role.level;
1906-
19071904
tdp_mmu_for_each_pte(iter, vcpu->kvm, root, gfn, gfn + 1) {
19081905
leaf = iter.level;
19091906
sptes[leaf] = iter.old_spte;
@@ -1912,6 +1909,36 @@ int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
19121909
return leaf;
19131910
}
19141911

1912+
int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
1913+
int *root_level)
1914+
{
1915+
struct kvm_mmu_page *root = root_to_sp(vcpu->arch.mmu->root.hpa);
1916+
*root_level = vcpu->arch.mmu->root_role.level;
1917+
1918+
return __kvm_tdp_mmu_get_walk(vcpu, addr, sptes, root);
1919+
}
1920+
1921+
bool kvm_tdp_mmu_gpa_is_mapped(struct kvm_vcpu *vcpu, u64 gpa)
1922+
{
1923+
struct kvm *kvm = vcpu->kvm;
1924+
bool is_direct = kvm_is_addr_direct(kvm, gpa);
1925+
hpa_t root = is_direct ? vcpu->arch.mmu->root.hpa :
1926+
vcpu->arch.mmu->mirror_root_hpa;
1927+
u64 sptes[PT64_ROOT_MAX_LEVEL + 1], spte;
1928+
int leaf;
1929+
1930+
lockdep_assert_held(&kvm->mmu_lock);
1931+
rcu_read_lock();
1932+
leaf = __kvm_tdp_mmu_get_walk(vcpu, gpa, sptes, root_to_sp(root));
1933+
rcu_read_unlock();
1934+
if (leaf < 0)
1935+
return false;
1936+
1937+
spte = sptes[leaf];
1938+
return is_shadow_present_pte(spte) && is_last_spte(spte, leaf);
1939+
}
1940+
EXPORT_SYMBOL_GPL(kvm_tdp_mmu_gpa_is_mapped);
1941+
19151942
/*
19161943
* Returns the last level spte pointer of the shadow page walk for the given
19171944
* gpa, and sets *spte to the spte value. This spte may be non-preset. If no

0 commit comments

Comments
 (0)