@@ -625,18 +625,18 @@ static inline void tdp_mmu_iter_set_spte(struct kvm *kvm, struct tdp_iter *iter,
625
625
iter -> gfn , iter -> level );
626
626
}
627
627
628
- #define tdp_root_for_each_pte (_iter , _root , _start , _end ) \
629
- for_each_tdp_pte(_iter, _root, _start, _end)
628
+ #define tdp_root_for_each_pte (_iter , _kvm , _root , _start , _end ) \
629
+ for_each_tdp_pte(_iter, _kvm, _root, _start, _end)
630
630
631
- #define tdp_root_for_each_leaf_pte (_iter , _root , _start , _end ) \
632
- tdp_root_for_each_pte(_iter, _root, _start, _end) \
631
+ #define tdp_root_for_each_leaf_pte (_iter , _kvm , _root , _start , _end ) \
632
+ tdp_root_for_each_pte(_iter, _kvm, _root, _start, _end) \
633
633
if (!is_shadow_present_pte(_iter.old_spte) || \
634
634
!is_last_spte(_iter.old_spte, _iter.level)) \
635
635
continue; \
636
636
else
637
637
638
- #define tdp_mmu_for_each_pte (_iter , _mmu , _start , _end ) \
639
- for_each_tdp_pte(_iter, root_to_sp(_mmu->root.hpa), _start, _end)
638
+ #define tdp_mmu_for_each_pte (_iter , _kvm , _mmu , _start , _end ) \
639
+ for_each_tdp_pte(_iter, _kvm, root_to_sp(_mmu->root.hpa), _start, _end)
640
640
641
641
static inline bool __must_check tdp_mmu_iter_need_resched (struct kvm * kvm ,
642
642
struct tdp_iter * iter )
@@ -708,7 +708,7 @@ static void __tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root,
708
708
gfn_t end = tdp_mmu_max_gfn_exclusive ();
709
709
gfn_t start = 0 ;
710
710
711
- for_each_tdp_pte_min_level (iter , root , zap_level , start , end ) {
711
+ for_each_tdp_pte_min_level (iter , kvm , root , zap_level , start , end ) {
712
712
retry :
713
713
if (tdp_mmu_iter_cond_resched (kvm , & iter , false, shared ))
714
714
continue ;
@@ -812,7 +812,7 @@ static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root,
812
812
813
813
rcu_read_lock ();
814
814
815
- for_each_tdp_pte_min_level (iter , root , PG_LEVEL_4K , start , end ) {
815
+ for_each_tdp_pte_min_level (iter , kvm , root , PG_LEVEL_4K , start , end ) {
816
816
if (can_yield &&
817
817
tdp_mmu_iter_cond_resched (kvm , & iter , flush , false)) {
818
818
flush = false;
@@ -1086,7 +1086,7 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
1086
1086
1087
1087
rcu_read_lock ();
1088
1088
1089
- tdp_mmu_for_each_pte (iter , mmu , fault -> gfn , fault -> gfn + 1 ) {
1089
+ tdp_mmu_for_each_pte (iter , kvm , mmu , fault -> gfn , fault -> gfn + 1 ) {
1090
1090
int r ;
1091
1091
1092
1092
if (fault -> nx_huge_page_workaround_enabled )
@@ -1212,7 +1212,7 @@ static bool __kvm_tdp_mmu_age_gfn_range(struct kvm *kvm,
1212
1212
for_each_valid_tdp_mmu_root (kvm , root , range -> slot -> as_id ) {
1213
1213
guard (rcu )();
1214
1214
1215
- tdp_root_for_each_leaf_pte (iter , root , range -> start , range -> end ) {
1215
+ tdp_root_for_each_leaf_pte (iter , kvm , root , range -> start , range -> end ) {
1216
1216
if (!is_accessed_spte (iter .old_spte ))
1217
1217
continue ;
1218
1218
@@ -1253,7 +1253,7 @@ static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
1253
1253
1254
1254
BUG_ON (min_level > KVM_MAX_HUGEPAGE_LEVEL );
1255
1255
1256
- for_each_tdp_pte_min_level (iter , root , min_level , start , end ) {
1256
+ for_each_tdp_pte_min_level (iter , kvm , root , min_level , start , end ) {
1257
1257
retry :
1258
1258
if (tdp_mmu_iter_cond_resched (kvm , & iter , false, true))
1259
1259
continue ;
@@ -1372,7 +1372,7 @@ static int tdp_mmu_split_huge_pages_root(struct kvm *kvm,
1372
1372
* level above the target level (e.g. splitting a 1GB to 512 2MB pages,
1373
1373
* and then splitting each of those to 512 4KB pages).
1374
1374
*/
1375
- for_each_tdp_pte_min_level (iter , root , target_level + 1 , start , end ) {
1375
+ for_each_tdp_pte_min_level (iter , kvm , root , target_level + 1 , start , end ) {
1376
1376
retry :
1377
1377
if (tdp_mmu_iter_cond_resched (kvm , & iter , false, shared ))
1378
1378
continue ;
@@ -1470,7 +1470,7 @@ static void clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
1470
1470
1471
1471
rcu_read_lock ();
1472
1472
1473
- tdp_root_for_each_pte (iter , root , start , end ) {
1473
+ tdp_root_for_each_pte (iter , kvm , root , start , end ) {
1474
1474
retry :
1475
1475
if (!is_shadow_present_pte (iter .old_spte ) ||
1476
1476
!is_last_spte (iter .old_spte , iter .level ))
@@ -1518,7 +1518,7 @@ static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
1518
1518
1519
1519
rcu_read_lock ();
1520
1520
1521
- tdp_root_for_each_leaf_pte (iter , root , gfn + __ffs (mask ),
1521
+ tdp_root_for_each_leaf_pte (iter , kvm , root , gfn + __ffs (mask ),
1522
1522
gfn + BITS_PER_LONG ) {
1523
1523
if (!mask )
1524
1524
break ;
@@ -1572,7 +1572,7 @@ static int tdp_mmu_make_huge_spte(struct kvm *kvm,
1572
1572
gfn_t end = start + KVM_PAGES_PER_HPAGE (parent -> level );
1573
1573
struct tdp_iter iter ;
1574
1574
1575
- tdp_root_for_each_leaf_pte (iter , root , start , end ) {
1575
+ tdp_root_for_each_leaf_pte (iter , kvm , root , start , end ) {
1576
1576
/*
1577
1577
* Use the parent iterator when checking for forward progress so
1578
1578
* that KVM doesn't get stuck continuously trying to yield (i.e.
@@ -1606,7 +1606,7 @@ static void recover_huge_pages_range(struct kvm *kvm,
1606
1606
1607
1607
rcu_read_lock ();
1608
1608
1609
- for_each_tdp_pte_min_level (iter , root , PG_LEVEL_2M , start , end ) {
1609
+ for_each_tdp_pte_min_level (iter , kvm , root , PG_LEVEL_2M , start , end ) {
1610
1610
retry :
1611
1611
if (tdp_mmu_iter_cond_resched (kvm , & iter , flush , true)) {
1612
1612
flush = false;
@@ -1687,7 +1687,7 @@ static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
1687
1687
1688
1688
rcu_read_lock ();
1689
1689
1690
- for_each_tdp_pte_min_level (iter , root , min_level , gfn , gfn + 1 ) {
1690
+ for_each_tdp_pte_min_level (iter , kvm , root , min_level , gfn , gfn + 1 ) {
1691
1691
if (!is_shadow_present_pte (iter .old_spte ) ||
1692
1692
!is_last_spte (iter .old_spte , iter .level ))
1693
1693
continue ;
@@ -1742,7 +1742,7 @@ int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
1742
1742
1743
1743
* root_level = vcpu -> arch .mmu -> root_role .level ;
1744
1744
1745
- tdp_mmu_for_each_pte (iter , mmu , gfn , gfn + 1 ) {
1745
+ tdp_mmu_for_each_pte (iter , vcpu -> kvm , mmu , gfn , gfn + 1 ) {
1746
1746
leaf = iter .level ;
1747
1747
sptes [leaf ] = iter .old_spte ;
1748
1748
}
@@ -1768,7 +1768,7 @@ u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, gfn_t gfn,
1768
1768
struct kvm_mmu * mmu = vcpu -> arch .mmu ;
1769
1769
tdp_ptep_t sptep = NULL ;
1770
1770
1771
- tdp_mmu_for_each_pte (iter , mmu , gfn , gfn + 1 ) {
1771
+ tdp_mmu_for_each_pte (iter , vcpu -> kvm , mmu , gfn , gfn + 1 ) {
1772
1772
* spte = iter .old_spte ;
1773
1773
sptep = iter .sptep ;
1774
1774
}
0 commit comments