@@ -155,23 +155,20 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
155
155
* If shared is set, this function is operating under the MMU lock in read
156
156
* mode.
157
157
*/
158
- #define __for_each_tdp_mmu_root_yield_safe (_kvm , _root , _as_id , _shared , _only_valid )\
159
- for (_root = tdp_mmu_next_root(_kvm, NULL, _only_valid); \
160
- _root; \
161
- _root = tdp_mmu_next_root(_kvm, _root, _only_valid)) \
162
- if (kvm_lockdep_assert_mmu_lock_held(_kvm, _shared) && \
163
- kvm_mmu_page_as_id(_root) != _as_id) { \
158
+ #define __for_each_tdp_mmu_root_yield_safe (_kvm , _root , _as_id , _only_valid )\
159
+ for (_root = tdp_mmu_next_root(_kvm, NULL, _only_valid); \
160
+ ({ lockdep_assert_held(&(_kvm)->mmu_lock); }), _root; \
161
+ _root = tdp_mmu_next_root(_kvm, _root, _only_valid)) \
162
+ if (kvm_mmu_page_as_id(_root) != _as_id) { \
164
163
} else
165
164
166
- #define for_each_valid_tdp_mmu_root_yield_safe (_kvm , _root , _as_id , _shared ) \
167
- __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, true)
165
+ #define for_each_valid_tdp_mmu_root_yield_safe (_kvm , _root , _as_id ) \
166
+ __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, true)
168
167
169
- #define for_each_tdp_mmu_root_yield_safe (_kvm , _root , _shared ) \
170
- for (_root = tdp_mmu_next_root(_kvm, NULL, false); \
171
- _root; \
172
- _root = tdp_mmu_next_root(_kvm, _root, false)) \
173
- if (!kvm_lockdep_assert_mmu_lock_held(_kvm, _shared)) { \
174
- } else
168
+ #define for_each_tdp_mmu_root_yield_safe (_kvm , _root ) \
169
+ for (_root = tdp_mmu_next_root(_kvm, NULL, false); \
170
+ ({ lockdep_assert_held(&(_kvm)->mmu_lock); }), _root; \
171
+ _root = tdp_mmu_next_root(_kvm, _root, false))
175
172
176
173
/*
177
174
* Iterate over all TDP MMU roots. Requires that mmu_lock be held for write,
@@ -840,7 +837,8 @@ bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, gfn_t start, gfn_t end, bool flush)
840
837
{
841
838
struct kvm_mmu_page * root ;
842
839
843
- for_each_tdp_mmu_root_yield_safe (kvm , root , false)
840
+ lockdep_assert_held_write (& kvm -> mmu_lock );
841
+ for_each_tdp_mmu_root_yield_safe (kvm , root )
844
842
flush = tdp_mmu_zap_leafs (kvm , root , start , end , true, flush );
845
843
846
844
return flush ;
@@ -862,7 +860,8 @@ void kvm_tdp_mmu_zap_all(struct kvm *kvm)
862
860
* is being destroyed or the userspace VMM has exited. In both cases,
863
861
* KVM_RUN is unreachable, i.e. no vCPUs will ever service the request.
864
862
*/
865
- for_each_tdp_mmu_root_yield_safe (kvm , root , false)
863
+ lockdep_assert_held_write (& kvm -> mmu_lock );
864
+ for_each_tdp_mmu_root_yield_safe (kvm , root )
866
865
tdp_mmu_zap_root (kvm , root , false);
867
866
}
868
867
@@ -876,7 +875,7 @@ void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
876
875
877
876
read_lock (& kvm -> mmu_lock );
878
877
879
- for_each_tdp_mmu_root_yield_safe (kvm , root , true ) {
878
+ for_each_tdp_mmu_root_yield_safe (kvm , root ) {
880
879
if (!root -> tdp_mmu_scheduled_root_to_zap )
881
880
continue ;
882
881
@@ -1133,7 +1132,7 @@ bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
1133
1132
{
1134
1133
struct kvm_mmu_page * root ;
1135
1134
1136
- __for_each_tdp_mmu_root_yield_safe (kvm , root , range -> slot -> as_id , false, false )
1135
+ __for_each_tdp_mmu_root_yield_safe (kvm , root , range -> slot -> as_id , false)
1137
1136
flush = tdp_mmu_zap_leafs (kvm , root , range -> start , range -> end ,
1138
1137
range -> may_block , flush );
1139
1138
@@ -1322,7 +1321,7 @@ bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm,
1322
1321
1323
1322
lockdep_assert_held_read (& kvm -> mmu_lock );
1324
1323
1325
- for_each_valid_tdp_mmu_root_yield_safe (kvm , root , slot -> as_id , true )
1324
+ for_each_valid_tdp_mmu_root_yield_safe (kvm , root , slot -> as_id )
1326
1325
spte_set |= wrprot_gfn_range (kvm , root , slot -> base_gfn ,
1327
1326
slot -> base_gfn + slot -> npages , min_level );
1328
1327
@@ -1354,6 +1353,8 @@ static struct kvm_mmu_page *tdp_mmu_alloc_sp_for_split(struct kvm *kvm,
1354
1353
{
1355
1354
struct kvm_mmu_page * sp ;
1356
1355
1356
+ kvm_lockdep_assert_mmu_lock_held (kvm , shared );
1357
+
1357
1358
/*
1358
1359
* Since we are allocating while under the MMU lock we have to be
1359
1360
* careful about GFP flags. Use GFP_NOWAIT to avoid blocking on direct
@@ -1504,8 +1505,7 @@ void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm,
1504
1505
int r = 0 ;
1505
1506
1506
1507
kvm_lockdep_assert_mmu_lock_held (kvm , shared );
1507
-
1508
- for_each_valid_tdp_mmu_root_yield_safe (kvm , root , slot -> as_id , shared ) {
1508
+ for_each_valid_tdp_mmu_root_yield_safe (kvm , root , slot -> as_id ) {
1509
1509
r = tdp_mmu_split_huge_pages_root (kvm , root , start , end , target_level , shared );
1510
1510
if (r ) {
1511
1511
kvm_tdp_mmu_put_root (kvm , root );
@@ -1569,8 +1569,7 @@ bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
1569
1569
bool spte_set = false;
1570
1570
1571
1571
lockdep_assert_held_read (& kvm -> mmu_lock );
1572
-
1573
- for_each_valid_tdp_mmu_root_yield_safe (kvm , root , slot -> as_id , true)
1572
+ for_each_valid_tdp_mmu_root_yield_safe (kvm , root , slot -> as_id )
1574
1573
spte_set |= clear_dirty_gfn_range (kvm , root , slot -> base_gfn ,
1575
1574
slot -> base_gfn + slot -> npages );
1576
1575
@@ -1704,8 +1703,7 @@ void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
1704
1703
struct kvm_mmu_page * root ;
1705
1704
1706
1705
lockdep_assert_held_read (& kvm -> mmu_lock );
1707
-
1708
- for_each_valid_tdp_mmu_root_yield_safe (kvm , root , slot -> as_id , true)
1706
+ for_each_valid_tdp_mmu_root_yield_safe (kvm , root , slot -> as_id )
1709
1707
zap_collapsible_spte_range (kvm , root , slot );
1710
1708
}
1711
1709
0 commit comments