Skip to content

Commit 5f3c8c9

Browse files
bonzinisean-jc
authored andcommitted
KVM: x86/mmu: remove unnecessary "bool shared" argument from functions
Neither tdp_mmu_next_root nor kvm_tdp_mmu_put_root need to know if the lock is taken for read or write. Either way, protection is achieved via RCU and tdp_mmu_pages_lock. Remove the argument and just assert that the lock is taken. Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Link: https://lore.kernel.org/r/20231125083400.1399197-2-pbonzini@redhat.com Signed-off-by: Sean Christopherson <seanjc@google.com>
1 parent 45a61eb commit 5f3c8c9

File tree

3 files changed

+23
-16
lines changed

3 files changed

+23
-16
lines changed

arch/x86/kvm/mmu/mmu.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3580,7 +3580,7 @@ static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
35803580
return;
35813581

35823582
if (is_tdp_mmu_page(sp))
3583-
kvm_tdp_mmu_put_root(kvm, sp, false);
3583+
kvm_tdp_mmu_put_root(kvm, sp);
35843584
else if (!--sp->root_count && sp->role.invalid)
35853585
kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
35863586

arch/x86/kvm/mmu/tdp_mmu.c

Lines changed: 21 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -73,10 +73,13 @@ static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head)
7373
tdp_mmu_free_sp(sp);
7474
}
7575

76-
void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
77-
bool shared)
76+
void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root)
7877
{
79-
kvm_lockdep_assert_mmu_lock_held(kvm, shared);
78+
/*
79+
* Either read or write is okay, but mmu_lock must be held because
80+
* writers are not required to take tdp_mmu_pages_lock.
81+
*/
82+
lockdep_assert_held(&kvm->mmu_lock);
8083

8184
if (!refcount_dec_and_test(&root->tdp_mmu_root_count))
8285
return;
@@ -106,10 +109,16 @@ void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
106109
*/
107110
static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
108111
struct kvm_mmu_page *prev_root,
109-
bool shared, bool only_valid)
112+
bool only_valid)
110113
{
111114
struct kvm_mmu_page *next_root;
112115

116+
/*
117+
* While the roots themselves are RCU-protected, fields such as
118+
* role.invalid are protected by mmu_lock.
119+
*/
120+
lockdep_assert_held(&kvm->mmu_lock);
121+
113122
rcu_read_lock();
114123

115124
if (prev_root)
@@ -132,7 +141,7 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
132141
rcu_read_unlock();
133142

134143
if (prev_root)
135-
kvm_tdp_mmu_put_root(kvm, prev_root, shared);
144+
kvm_tdp_mmu_put_root(kvm, prev_root);
136145

137146
return next_root;
138147
}
@@ -144,13 +153,12 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
144153
* recent root. (Unless keeping a live reference is desirable.)
145154
*
146155
* If shared is set, this function is operating under the MMU lock in read
147-
* mode. In the unlikely event that this thread must free a root, the lock
148-
* will be temporarily dropped and reacquired in write mode.
156+
* mode.
149157
*/
150158
#define __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, _only_valid)\
151-
for (_root = tdp_mmu_next_root(_kvm, NULL, _shared, _only_valid); \
159+
for (_root = tdp_mmu_next_root(_kvm, NULL, _only_valid); \
152160
_root; \
153-
_root = tdp_mmu_next_root(_kvm, _root, _shared, _only_valid)) \
161+
_root = tdp_mmu_next_root(_kvm, _root, _only_valid)) \
154162
if (kvm_lockdep_assert_mmu_lock_held(_kvm, _shared) && \
155163
kvm_mmu_page_as_id(_root) != _as_id) { \
156164
} else
@@ -159,9 +167,9 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
159167
__for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, true)
160168

161169
#define for_each_tdp_mmu_root_yield_safe(_kvm, _root, _shared) \
162-
for (_root = tdp_mmu_next_root(_kvm, NULL, _shared, false); \
170+
for (_root = tdp_mmu_next_root(_kvm, NULL, false); \
163171
_root; \
164-
_root = tdp_mmu_next_root(_kvm, _root, _shared, false)) \
172+
_root = tdp_mmu_next_root(_kvm, _root, false)) \
165173
if (!kvm_lockdep_assert_mmu_lock_held(_kvm, _shared)) { \
166174
} else
167175

@@ -891,7 +899,7 @@ void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
891899
* the root must be reachable by mmu_notifiers while it's being
892900
* zapped
893901
*/
894-
kvm_tdp_mmu_put_root(kvm, root, true);
902+
kvm_tdp_mmu_put_root(kvm, root);
895903
}
896904

897905
read_unlock(&kvm->mmu_lock);
@@ -1500,7 +1508,7 @@ void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm,
15001508
for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, shared) {
15011509
r = tdp_mmu_split_huge_pages_root(kvm, root, start, end, target_level, shared);
15021510
if (r) {
1503-
kvm_tdp_mmu_put_root(kvm, root, shared);
1511+
kvm_tdp_mmu_put_root(kvm, root);
15041512
break;
15051513
}
15061514
}

arch/x86/kvm/mmu/tdp_mmu.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -17,8 +17,7 @@ __must_check static inline bool kvm_tdp_mmu_get_root(struct kvm_mmu_page *root)
1717
return refcount_inc_not_zero(&root->tdp_mmu_root_count);
1818
}
1919

20-
void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
21-
bool shared);
20+
void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root);
2221

2322
bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, gfn_t start, gfn_t end, bool flush);
2423
bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp);

0 commit comments

Comments
 (0)