Skip to content

Commit e23186d

Browse files
yamahatabonzini
authored andcommitted
KVM: x86/tdp_mmu: Take struct kvm in iter loops
Add a struct kvm argument to the TDP MMU iterators. Future changes will want to change how the iterator behaves based on a member of struct kvm. Change the signature and callers of the iterator loop helpers in a separate patch to make the future one easier to review. Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com> Signed-off-by: Rick Edgecombe <rick.p.edgecombe@intel.com> Message-ID: <20240718211230.1492011-8-rick.p.edgecombe@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
1 parent 243e13e commit e23186d

File tree

2 files changed

+22
-22
lines changed

2 files changed

+22
-22
lines changed

arch/x86/kvm/mmu/tdp_iter.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -122,13 +122,13 @@ struct tdp_iter {
122122
* Iterates over every SPTE mapping the GFN range [start, end) in a
123123
* preorder traversal.
124124
*/
125-
#define for_each_tdp_pte_min_level(iter, root, min_level, start, end) \
125+
#define for_each_tdp_pte_min_level(iter, kvm, root, min_level, start, end) \
126126
for (tdp_iter_start(&iter, root, min_level, start); \
127127
iter.valid && iter.gfn < end; \
128128
tdp_iter_next(&iter))
129129

130-
#define for_each_tdp_pte(iter, root, start, end) \
131-
for_each_tdp_pte_min_level(iter, root, PG_LEVEL_4K, start, end)
130+
#define for_each_tdp_pte(iter, kvm, root, start, end) \
131+
for_each_tdp_pte_min_level(iter, kvm, root, PG_LEVEL_4K, start, end)
132132

133133
tdp_ptep_t spte_to_child_pt(u64 pte, int level);
134134

arch/x86/kvm/mmu/tdp_mmu.c

Lines changed: 19 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -625,18 +625,18 @@ static inline void tdp_mmu_iter_set_spte(struct kvm *kvm, struct tdp_iter *iter,
625625
iter->gfn, iter->level);
626626
}
627627

628-
#define tdp_root_for_each_pte(_iter, _root, _start, _end) \
629-
for_each_tdp_pte(_iter, _root, _start, _end)
628+
#define tdp_root_for_each_pte(_iter, _kvm, _root, _start, _end) \
629+
for_each_tdp_pte(_iter, _kvm, _root, _start, _end)
630630

631-
#define tdp_root_for_each_leaf_pte(_iter, _root, _start, _end) \
632-
tdp_root_for_each_pte(_iter, _root, _start, _end) \
631+
#define tdp_root_for_each_leaf_pte(_iter, _kvm, _root, _start, _end) \
632+
tdp_root_for_each_pte(_iter, _kvm, _root, _start, _end) \
633633
if (!is_shadow_present_pte(_iter.old_spte) || \
634634
!is_last_spte(_iter.old_spte, _iter.level)) \
635635
continue; \
636636
else
637637

638-
#define tdp_mmu_for_each_pte(_iter, _mmu, _start, _end) \
639-
for_each_tdp_pte(_iter, root_to_sp(_mmu->root.hpa), _start, _end)
638+
#define tdp_mmu_for_each_pte(_iter, _kvm, _mmu, _start, _end) \
639+
for_each_tdp_pte(_iter, _kvm, root_to_sp(_mmu->root.hpa), _start, _end)
640640

641641
static inline bool __must_check tdp_mmu_iter_need_resched(struct kvm *kvm,
642642
struct tdp_iter *iter)
@@ -708,7 +708,7 @@ static void __tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root,
708708
gfn_t end = tdp_mmu_max_gfn_exclusive();
709709
gfn_t start = 0;
710710

711-
for_each_tdp_pte_min_level(iter, root, zap_level, start, end) {
711+
for_each_tdp_pte_min_level(iter, kvm, root, zap_level, start, end) {
712712
retry:
713713
if (tdp_mmu_iter_cond_resched(kvm, &iter, false, shared))
714714
continue;
@@ -812,7 +812,7 @@ static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root,
812812

813813
rcu_read_lock();
814814

815-
for_each_tdp_pte_min_level(iter, root, PG_LEVEL_4K, start, end) {
815+
for_each_tdp_pte_min_level(iter, kvm, root, PG_LEVEL_4K, start, end) {
816816
if (can_yield &&
817817
tdp_mmu_iter_cond_resched(kvm, &iter, flush, false)) {
818818
flush = false;
@@ -1086,7 +1086,7 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
10861086

10871087
rcu_read_lock();
10881088

1089-
tdp_mmu_for_each_pte(iter, mmu, fault->gfn, fault->gfn + 1) {
1089+
tdp_mmu_for_each_pte(iter, kvm, mmu, fault->gfn, fault->gfn + 1) {
10901090
int r;
10911091

10921092
if (fault->nx_huge_page_workaround_enabled)
@@ -1212,7 +1212,7 @@ static bool __kvm_tdp_mmu_age_gfn_range(struct kvm *kvm,
12121212
for_each_valid_tdp_mmu_root(kvm, root, range->slot->as_id) {
12131213
guard(rcu)();
12141214

1215-
tdp_root_for_each_leaf_pte(iter, root, range->start, range->end) {
1215+
tdp_root_for_each_leaf_pte(iter, kvm, root, range->start, range->end) {
12161216
if (!is_accessed_spte(iter.old_spte))
12171217
continue;
12181218

@@ -1253,7 +1253,7 @@ static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
12531253

12541254
BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);
12551255

1256-
for_each_tdp_pte_min_level(iter, root, min_level, start, end) {
1256+
for_each_tdp_pte_min_level(iter, kvm, root, min_level, start, end) {
12571257
retry:
12581258
if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
12591259
continue;
@@ -1372,7 +1372,7 @@ static int tdp_mmu_split_huge_pages_root(struct kvm *kvm,
13721372
* level above the target level (e.g. splitting a 1GB to 512 2MB pages,
13731373
* and then splitting each of those to 512 4KB pages).
13741374
*/
1375-
for_each_tdp_pte_min_level(iter, root, target_level + 1, start, end) {
1375+
for_each_tdp_pte_min_level(iter, kvm, root, target_level + 1, start, end) {
13761376
retry:
13771377
if (tdp_mmu_iter_cond_resched(kvm, &iter, false, shared))
13781378
continue;
@@ -1470,7 +1470,7 @@ static void clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
14701470

14711471
rcu_read_lock();
14721472

1473-
tdp_root_for_each_pte(iter, root, start, end) {
1473+
tdp_root_for_each_pte(iter, kvm, root, start, end) {
14741474
retry:
14751475
if (!is_shadow_present_pte(iter.old_spte) ||
14761476
!is_last_spte(iter.old_spte, iter.level))
@@ -1518,7 +1518,7 @@ static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
15181518

15191519
rcu_read_lock();
15201520

1521-
tdp_root_for_each_leaf_pte(iter, root, gfn + __ffs(mask),
1521+
tdp_root_for_each_leaf_pte(iter, kvm, root, gfn + __ffs(mask),
15221522
gfn + BITS_PER_LONG) {
15231523
if (!mask)
15241524
break;
@@ -1572,7 +1572,7 @@ static int tdp_mmu_make_huge_spte(struct kvm *kvm,
15721572
gfn_t end = start + KVM_PAGES_PER_HPAGE(parent->level);
15731573
struct tdp_iter iter;
15741574

1575-
tdp_root_for_each_leaf_pte(iter, root, start, end) {
1575+
tdp_root_for_each_leaf_pte(iter, kvm, root, start, end) {
15761576
/*
15771577
* Use the parent iterator when checking for forward progress so
15781578
* that KVM doesn't get stuck continuously trying to yield (i.e.
@@ -1606,7 +1606,7 @@ static void recover_huge_pages_range(struct kvm *kvm,
16061606

16071607
rcu_read_lock();
16081608

1609-
for_each_tdp_pte_min_level(iter, root, PG_LEVEL_2M, start, end) {
1609+
for_each_tdp_pte_min_level(iter, kvm, root, PG_LEVEL_2M, start, end) {
16101610
retry:
16111611
if (tdp_mmu_iter_cond_resched(kvm, &iter, flush, true)) {
16121612
flush = false;
@@ -1687,7 +1687,7 @@ static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
16871687

16881688
rcu_read_lock();
16891689

1690-
for_each_tdp_pte_min_level(iter, root, min_level, gfn, gfn + 1) {
1690+
for_each_tdp_pte_min_level(iter, kvm, root, min_level, gfn, gfn + 1) {
16911691
if (!is_shadow_present_pte(iter.old_spte) ||
16921692
!is_last_spte(iter.old_spte, iter.level))
16931693
continue;
@@ -1742,7 +1742,7 @@ int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
17421742

17431743
*root_level = vcpu->arch.mmu->root_role.level;
17441744

1745-
tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
1745+
tdp_mmu_for_each_pte(iter, vcpu->kvm, mmu, gfn, gfn + 1) {
17461746
leaf = iter.level;
17471747
sptes[leaf] = iter.old_spte;
17481748
}
@@ -1768,7 +1768,7 @@ u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, gfn_t gfn,
17681768
struct kvm_mmu *mmu = vcpu->arch.mmu;
17691769
tdp_ptep_t sptep = NULL;
17701770

1771-
tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
1771+
tdp_mmu_for_each_pte(iter, vcpu->kvm, mmu, gfn, gfn + 1) {
17721772
*spte = iter.old_spte;
17731773
sptep = iter.sptep;
17741774
}

0 commit comments

Comments
 (0)