Skip to content

Commit 6577f1e

Browse files
committed
KVM: x86/mmu: Allow passing '-1' for "all" as_id for TDP MMU iterators
Modify for_each_tdp_mmu_root() and __for_each_tdp_mmu_root_yield_safe() to accept -1 for _as_id to mean "process all memslot address spaces". That way code that wants to process both SMM and !SMM doesn't need to iterate over roots twice (and likely copy+paste code in the process). Deliberately don't cast _as_id to an "int", just in case not casting helps the compiler elide the "_as_id >=0" check when being passed an unsigned value, e.g. from a memslot. No functional change intended. Link: https://lore.kernel.org/r/20240111020048.844847-4-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
1 parent fcdffe9 commit 6577f1e

File tree

1 file changed

+9
-9
lines changed

1 file changed

+9
-9
lines changed

arch/x86/kvm/mmu/tdp_mmu.c

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -149,11 +149,11 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
149149
* If shared is set, this function is operating under the MMU lock in read
150150
* mode.
151151
*/
152-
#define __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _only_valid)\
153-
for (_root = tdp_mmu_next_root(_kvm, NULL, _only_valid); \
154-
({ lockdep_assert_held(&(_kvm)->mmu_lock); }), _root; \
155-
_root = tdp_mmu_next_root(_kvm, _root, _only_valid)) \
156-
if (kvm_mmu_page_as_id(_root) != _as_id) { \
152+
#define __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _only_valid) \
153+
for (_root = tdp_mmu_next_root(_kvm, NULL, _only_valid); \
154+
({ lockdep_assert_held(&(_kvm)->mmu_lock); }), _root; \
155+
_root = tdp_mmu_next_root(_kvm, _root, _only_valid)) \
156+
if (_as_id >= 0 && kvm_mmu_page_as_id(_root) != _as_id) { \
157157
} else
158158

159159
#define for_each_valid_tdp_mmu_root_yield_safe(_kvm, _root, _as_id) \
@@ -171,10 +171,10 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
171171
* Holding mmu_lock for write obviates the need for RCU protection as the list
172172
* is guaranteed to be stable.
173173
*/
174-
#define for_each_tdp_mmu_root(_kvm, _root, _as_id) \
175-
list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link) \
176-
if (kvm_lockdep_assert_mmu_lock_held(_kvm, false) && \
177-
kvm_mmu_page_as_id(_root) != _as_id) { \
174+
#define for_each_tdp_mmu_root(_kvm, _root, _as_id) \
175+
list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link) \
176+
if (kvm_lockdep_assert_mmu_lock_held(_kvm, false) && \
177+
_as_id >= 0 && kvm_mmu_page_as_id(_root) != _as_id) { \
178178
} else
179179

180180
static struct kvm_mmu_page *tdp_mmu_alloc_sp(struct kvm_vcpu *vcpu)

0 commit comments

Comments
 (0)