Skip to content

Commit de86ef7

Browse files
yamahatabonzini
authored andcommitted
KVM: x86/tdp_mmu: Introduce KVM MMU root types to specify page table type
Define an enum kvm_tdp_mmu_root_types to specify the KVM MMU root type [1] so that the iterator on the root page table can consistently filter the root page table type instead of only_valid. TDX KVM will operate on KVM page tables with specified types. Shared page table, private page table, or both. Introduce an enum instead of bool only_valid so that we can easily enhance page table types applicable to shared, private, or both in addition to valid or not. Replace only_valid=false with KVM_ANY_ROOTS and only_valid=true with KVM_ANY_VALID_ROOTS. Use KVM_ANY_ROOTS and KVM_ANY_VALID_ROOTS to wrap KVM_VALID_ROOTS to avoid further code churn when direct vs mirror root concepts are introduced in future patches. Link: https://lore.kernel.org/kvm/ZivazWQw1oCU8VBC@google.com/ [1] Suggested-by: Sean Christopherson <seanjc@google.com> Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com> Signed-off-by: Rick Edgecombe <rick.p.edgecombe@intel.com> Message-ID: <20240718211230.1492011-11-rick.p.edgecombe@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
1 parent e84b8e4 commit de86ef7

File tree

2 files changed

+30
-19
lines changed

2 files changed

+30
-19
lines changed

arch/x86/kvm/mmu/tdp_mmu.c

Lines changed: 23 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -92,27 +92,31 @@ void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root)
9292
call_rcu(&root->rcu_head, tdp_mmu_free_sp_rcu_callback);
9393
}
9494

95-
static bool tdp_mmu_root_match(struct kvm_mmu_page *root, bool only_valid)
95+
static bool tdp_mmu_root_match(struct kvm_mmu_page *root,
96+
enum kvm_tdp_mmu_root_types types)
9697
{
97-
if (only_valid && root->role.invalid)
98+
if (WARN_ON_ONCE(!(types & KVM_VALID_ROOTS)))
99+
return false;
100+
101+
if (root->role.invalid && !(types & KVM_INVALID_ROOTS))
98102
return false;
99103

100104
return true;
101105
}
102106

103107
/*
104108
* Returns the next root after @prev_root (or the first root if @prev_root is
105-
* NULL). A reference to the returned root is acquired, and the reference to
106-
* @prev_root is released (the caller obviously must hold a reference to
107-
* @prev_root if it's non-NULL).
109+
* NULL) that matches with @types. A reference to the returned root is
110+
* acquired, and the reference to @prev_root is released (the caller obviously
111+
* must hold a reference to @prev_root if it's non-NULL).
108112
*
109-
* If @only_valid is true, invalid roots are skipped.
113+
* Roots that doesn't match with @types are skipped.
110114
*
111115
* Returns NULL if the end of tdp_mmu_roots was reached.
112116
*/
113117
static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
114118
struct kvm_mmu_page *prev_root,
115-
bool only_valid)
119+
enum kvm_tdp_mmu_root_types types)
116120
{
117121
struct kvm_mmu_page *next_root;
118122

@@ -133,7 +137,7 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
133137
typeof(*next_root), link);
134138

135139
while (next_root) {
136-
if (tdp_mmu_root_match(next_root, only_valid) &&
140+
if (tdp_mmu_root_match(next_root, types) &&
137141
kvm_tdp_mmu_get_root(next_root))
138142
break;
139143

@@ -158,20 +162,20 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
158162
* If shared is set, this function is operating under the MMU lock in read
159163
* mode.
160164
*/
161-
#define __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _only_valid) \
162-
for (_root = tdp_mmu_next_root(_kvm, NULL, _only_valid); \
165+
#define __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _types) \
166+
for (_root = tdp_mmu_next_root(_kvm, NULL, _types); \
163167
({ lockdep_assert_held(&(_kvm)->mmu_lock); }), _root; \
164-
_root = tdp_mmu_next_root(_kvm, _root, _only_valid)) \
168+
_root = tdp_mmu_next_root(_kvm, _root, _types)) \
165169
if (_as_id >= 0 && kvm_mmu_page_as_id(_root) != _as_id) { \
166170
} else
167171

168172
#define for_each_valid_tdp_mmu_root_yield_safe(_kvm, _root, _as_id) \
169-
__for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, true)
173+
__for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, KVM_VALID_ROOTS)
170174

171175
#define for_each_tdp_mmu_root_yield_safe(_kvm, _root) \
172-
for (_root = tdp_mmu_next_root(_kvm, NULL, false); \
176+
for (_root = tdp_mmu_next_root(_kvm, NULL, KVM_ALL_ROOTS); \
173177
({ lockdep_assert_held(&(_kvm)->mmu_lock); }), _root; \
174-
_root = tdp_mmu_next_root(_kvm, _root, false))
178+
_root = tdp_mmu_next_root(_kvm, _root, KVM_ALL_ROOTS))
175179

176180
/*
177181
* Iterate over all TDP MMU roots. Requires that mmu_lock be held for write,
@@ -180,18 +184,18 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
180184
* Holding mmu_lock for write obviates the need for RCU protection as the list
181185
* is guaranteed to be stable.
182186
*/
183-
#define __for_each_tdp_mmu_root(_kvm, _root, _as_id, _only_valid) \
187+
#define __for_each_tdp_mmu_root(_kvm, _root, _as_id, _types) \
184188
list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link) \
185189
if (kvm_lockdep_assert_mmu_lock_held(_kvm, false) && \
186190
((_as_id >= 0 && kvm_mmu_page_as_id(_root) != _as_id) || \
187-
!tdp_mmu_root_match((_root), (_only_valid)))) { \
191+
!tdp_mmu_root_match((_root), (_types)))) { \
188192
} else
189193

190194
#define for_each_tdp_mmu_root(_kvm, _root, _as_id) \
191-
__for_each_tdp_mmu_root(_kvm, _root, _as_id, false)
195+
__for_each_tdp_mmu_root(_kvm, _root, _as_id, KVM_ALL_ROOTS)
192196

193197
#define for_each_valid_tdp_mmu_root(_kvm, _root, _as_id) \
194-
__for_each_tdp_mmu_root(_kvm, _root, _as_id, true)
198+
__for_each_tdp_mmu_root(_kvm, _root, _as_id, KVM_VALID_ROOTS)
195199

196200
static struct kvm_mmu_page *tdp_mmu_alloc_sp(struct kvm_vcpu *vcpu)
197201
{
@@ -1164,7 +1168,7 @@ bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
11641168
{
11651169
struct kvm_mmu_page *root;
11661170

1167-
__for_each_tdp_mmu_root_yield_safe(kvm, root, range->slot->as_id, false)
1171+
__for_each_tdp_mmu_root_yield_safe(kvm, root, range->slot->as_id, KVM_ALL_ROOTS)
11681172
flush = tdp_mmu_zap_leafs(kvm, root, range->start, range->end,
11691173
range->may_block, flush);
11701174

arch/x86/kvm/mmu/tdp_mmu.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,13 @@ __must_check static inline bool kvm_tdp_mmu_get_root(struct kvm_mmu_page *root)
1919

2020
void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root);
2121

22+
enum kvm_tdp_mmu_root_types {
23+
KVM_INVALID_ROOTS = BIT(0),
24+
25+
KVM_VALID_ROOTS = BIT(1),
26+
KVM_ALL_ROOTS = KVM_VALID_ROOTS | KVM_INVALID_ROOTS,
27+
};
28+
2229
bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, gfn_t start, gfn_t end, bool flush);
2330
bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp);
2431
void kvm_tdp_mmu_zap_all(struct kvm *kvm);

0 commit comments

Comments
 (0)