@@ -73,10 +73,13 @@ static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head)
73
73
tdp_mmu_free_sp (sp );
74
74
}
75
75
76
- void kvm_tdp_mmu_put_root (struct kvm * kvm , struct kvm_mmu_page * root ,
77
- bool shared )
76
+ void kvm_tdp_mmu_put_root (struct kvm * kvm , struct kvm_mmu_page * root )
78
77
{
79
- kvm_lockdep_assert_mmu_lock_held (kvm , shared );
78
+ /*
79
+ * Either read or write is okay, but mmu_lock must be held because
80
+ * writers are not required to take tdp_mmu_pages_lock.
81
+ */
82
+ lockdep_assert_held (& kvm -> mmu_lock );
80
83
81
84
if (!refcount_dec_and_test (& root -> tdp_mmu_root_count ))
82
85
return ;
@@ -106,10 +109,16 @@ void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
106
109
*/
107
110
static struct kvm_mmu_page * tdp_mmu_next_root (struct kvm * kvm ,
108
111
struct kvm_mmu_page * prev_root ,
109
- bool shared , bool only_valid )
112
+ bool only_valid )
110
113
{
111
114
struct kvm_mmu_page * next_root ;
112
115
116
+ /*
117
+ * While the roots themselves are RCU-protected, fields such as
118
+ * role.invalid are protected by mmu_lock.
119
+ */
120
+ lockdep_assert_held (& kvm -> mmu_lock );
121
+
113
122
rcu_read_lock ();
114
123
115
124
if (prev_root )
@@ -132,7 +141,7 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
132
141
rcu_read_unlock ();
133
142
134
143
if (prev_root )
135
- kvm_tdp_mmu_put_root (kvm , prev_root , shared );
144
+ kvm_tdp_mmu_put_root (kvm , prev_root );
136
145
137
146
return next_root ;
138
147
}
@@ -144,13 +153,12 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
144
153
* recent root. (Unless keeping a live reference is desirable.)
145
154
*
146
155
* If shared is set, this function is operating under the MMU lock in read
147
- * mode. In the unlikely event that this thread must free a root, the lock
148
- * will be temporarily dropped and reacquired in write mode.
156
+ * mode.
149
157
*/
150
158
#define __for_each_tdp_mmu_root_yield_safe (_kvm , _root , _as_id , _shared , _only_valid )\
151
- for (_root = tdp_mmu_next_root(_kvm, NULL, _shared, _only_valid); \
159
+ for (_root = tdp_mmu_next_root(_kvm, NULL, _only_valid); \
152
160
_root; \
153
- _root = tdp_mmu_next_root(_kvm, _root, _shared, _only_valid)) \
161
+ _root = tdp_mmu_next_root(_kvm, _root, _only_valid)) \
154
162
if (kvm_lockdep_assert_mmu_lock_held(_kvm, _shared) && \
155
163
kvm_mmu_page_as_id(_root) != _as_id) { \
156
164
} else
@@ -159,9 +167,9 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
159
167
__for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, true)
160
168
161
169
#define for_each_tdp_mmu_root_yield_safe (_kvm , _root , _shared ) \
162
- for (_root = tdp_mmu_next_root(_kvm, NULL, _shared, false); \
170
+ for (_root = tdp_mmu_next_root(_kvm, NULL, false); \
163
171
_root; \
164
- _root = tdp_mmu_next_root(_kvm, _root, _shared, false)) \
172
+ _root = tdp_mmu_next_root(_kvm, _root, false)) \
165
173
if (!kvm_lockdep_assert_mmu_lock_held(_kvm, _shared)) { \
166
174
} else
167
175
@@ -891,7 +899,7 @@ void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
891
899
* the root must be reachable by mmu_notifiers while it's being
892
900
* zapped
893
901
*/
894
- kvm_tdp_mmu_put_root (kvm , root , true );
902
+ kvm_tdp_mmu_put_root (kvm , root );
895
903
}
896
904
897
905
read_unlock (& kvm -> mmu_lock );
@@ -1500,7 +1508,7 @@ void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm,
1500
1508
for_each_valid_tdp_mmu_root_yield_safe (kvm , root , slot -> as_id , shared ) {
1501
1509
r = tdp_mmu_split_huge_pages_root (kvm , root , start , end , target_level , shared );
1502
1510
if (r ) {
1503
- kvm_tdp_mmu_put_root (kvm , root , shared );
1511
+ kvm_tdp_mmu_put_root (kvm , root );
1504
1512
break ;
1505
1513
}
1506
1514
}
0 commit comments