Skip to content

Commit b38c977

Browse files
Quentin PerretMarc Zyngier
authored andcommitted
KVM: arm64: Convert pkvm_mappings to interval tree
In preparation for supporting stage-2 huge mappings for np-guest, let's convert pgt.pkvm_mappings to an interval tree. No functional change intended. Suggested-by: Vincent Donnefort <vdonnefort@google.com> Signed-off-by: Quentin Perret <qperret@google.com> Signed-off-by: Vincent Donnefort <vdonnefort@google.com> Link: https://lore.kernel.org/r/20250521124834.1070650-8-vdonnefort@google.com Signed-off-by: Marc Zyngier <maz@kernel.org>
1 parent c4d99a8 commit b38c977

File tree

3 files changed

+37
-60
lines changed

3 files changed

+37
-60
lines changed

arch/arm64/include/asm/kvm_pgtable.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -413,7 +413,7 @@ static inline bool kvm_pgtable_walk_lock_held(void)
413413
*/
414414
struct kvm_pgtable {
415415
union {
416-
struct rb_root pkvm_mappings;
416+
struct rb_root_cached pkvm_mappings;
417417
struct {
418418
u32 ia_bits;
419419
s8 start_level;

arch/arm64/include/asm/kvm_pkvm.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -173,6 +173,7 @@ struct pkvm_mapping {
173173
struct rb_node node;
174174
u64 gfn;
175175
u64 pfn;
176+
u64 __subtree_last; /* Internal member for interval tree */
176177
};
177178

178179
int pkvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,

arch/arm64/kvm/pkvm.c

Lines changed: 35 additions & 59 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
*/
66

77
#include <linux/init.h>
8+
#include <linux/interval_tree_generic.h>
89
#include <linux/kmemleak.h>
910
#include <linux/kvm_host.h>
1011
#include <asm/kvm_mmu.h>
@@ -275,80 +276,67 @@ static int __init finalize_pkvm(void)
275276
}
276277
device_initcall_sync(finalize_pkvm);
277278

278-
static int cmp_mappings(struct rb_node *node, const struct rb_node *parent)
279+
static u64 __pkvm_mapping_start(struct pkvm_mapping *m)
279280
{
280-
struct pkvm_mapping *a = rb_entry(node, struct pkvm_mapping, node);
281-
struct pkvm_mapping *b = rb_entry(parent, struct pkvm_mapping, node);
282-
283-
if (a->gfn < b->gfn)
284-
return -1;
285-
if (a->gfn > b->gfn)
286-
return 1;
287-
return 0;
281+
return m->gfn * PAGE_SIZE;
288282
}
289283

290-
static struct rb_node *find_first_mapping_node(struct rb_root *root, u64 gfn)
284+
static u64 __pkvm_mapping_end(struct pkvm_mapping *m)
291285
{
292-
struct rb_node *node = root->rb_node, *prev = NULL;
293-
struct pkvm_mapping *mapping;
294-
295-
while (node) {
296-
mapping = rb_entry(node, struct pkvm_mapping, node);
297-
if (mapping->gfn == gfn)
298-
return node;
299-
prev = node;
300-
node = (gfn < mapping->gfn) ? node->rb_left : node->rb_right;
301-
}
302-
303-
return prev;
286+
return (m->gfn + 1) * PAGE_SIZE - 1;
304287
}
305288

289+
INTERVAL_TREE_DEFINE(struct pkvm_mapping, node, u64, __subtree_last,
290+
__pkvm_mapping_start, __pkvm_mapping_end, static,
291+
pkvm_mapping);
292+
306293
/*
307-
* __tmp is updated to rb_next(__tmp) *before* entering the body of the loop to allow freeing
308-
* of __map inline.
294+
* __tmp is updated to iter_first(pkvm_mappings) *before* entering the body of the loop to allow
295+
* freeing of __map inline.
309296
*/
310297
#define for_each_mapping_in_range_safe(__pgt, __start, __end, __map) \
311-
for (struct rb_node *__tmp = find_first_mapping_node(&(__pgt)->pkvm_mappings, \
312-
((__start) >> PAGE_SHIFT)); \
298+
for (struct pkvm_mapping *__tmp = pkvm_mapping_iter_first(&(__pgt)->pkvm_mappings, \
299+
__start, __end - 1); \
313300
__tmp && ({ \
314-
__map = rb_entry(__tmp, struct pkvm_mapping, node); \
315-
__tmp = rb_next(__tmp); \
301+
__map = __tmp; \
302+
__tmp = pkvm_mapping_iter_next(__map, __start, __end - 1); \
316303
true; \
317304
}); \
318-
) \
319-
if (__map->gfn < ((__start) >> PAGE_SHIFT)) \
320-
continue; \
321-
else if (__map->gfn >= ((__end) >> PAGE_SHIFT)) \
322-
break; \
323-
else
305+
)
324306

325307
int pkvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
326308
struct kvm_pgtable_mm_ops *mm_ops)
327309
{
328-
pgt->pkvm_mappings = RB_ROOT;
310+
pgt->pkvm_mappings = RB_ROOT_CACHED;
329311
pgt->mmu = mmu;
330312

331313
return 0;
332314
}
333315

334-
void pkvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt)
316+
static int __pkvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 start, u64 end)
335317
{
336318
struct kvm *kvm = kvm_s2_mmu_to_kvm(pgt->mmu);
337319
pkvm_handle_t handle = kvm->arch.pkvm.handle;
338320
struct pkvm_mapping *mapping;
339-
struct rb_node *node;
321+
int ret;
340322

341323
if (!handle)
342-
return;
324+
return 0;
343325

344-
node = rb_first(&pgt->pkvm_mappings);
345-
while (node) {
346-
mapping = rb_entry(node, struct pkvm_mapping, node);
347-
kvm_call_hyp_nvhe(__pkvm_host_unshare_guest, handle, mapping->gfn);
348-
node = rb_next(node);
349-
rb_erase(&mapping->node, &pgt->pkvm_mappings);
326+
for_each_mapping_in_range_safe(pgt, start, end, mapping) {
327+
ret = kvm_call_hyp_nvhe(__pkvm_host_unshare_guest, handle, mapping->gfn, 1);
328+
if (WARN_ON(ret))
329+
return ret;
330+
pkvm_mapping_remove(mapping, &pgt->pkvm_mappings);
350331
kfree(mapping);
351332
}
333+
334+
return 0;
335+
}
336+
337+
void pkvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt)
338+
{
339+
__pkvm_pgtable_stage2_unmap(pgt, 0, ~(0ULL));
352340
}
353341

354342
int pkvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
@@ -376,28 +364,16 @@ int pkvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
376364
swap(mapping, cache->mapping);
377365
mapping->gfn = gfn;
378366
mapping->pfn = pfn;
379-
WARN_ON(rb_find_add(&mapping->node, &pgt->pkvm_mappings, cmp_mappings));
367+
pkvm_mapping_insert(mapping, &pgt->pkvm_mappings);
380368

381369
return ret;
382370
}
383371

384372
int pkvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size)
385373
{
386-
struct kvm *kvm = kvm_s2_mmu_to_kvm(pgt->mmu);
387-
pkvm_handle_t handle = kvm->arch.pkvm.handle;
388-
struct pkvm_mapping *mapping;
389-
int ret = 0;
390-
391-
lockdep_assert_held_write(&kvm->mmu_lock);
392-
for_each_mapping_in_range_safe(pgt, addr, addr + size, mapping) {
393-
ret = kvm_call_hyp_nvhe(__pkvm_host_unshare_guest, handle, mapping->gfn, 1);
394-
if (WARN_ON(ret))
395-
break;
396-
rb_erase(&mapping->node, &pgt->pkvm_mappings);
397-
kfree(mapping);
398-
}
374+
lockdep_assert_held_write(&kvm_s2_mmu_to_kvm(pgt->mmu)->mmu_lock);
399375

400-
return ret;
376+
return __pkvm_pgtable_stage2_unmap(pgt, addr, addr + size);
401377
}
402378

403379
int pkvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size)

0 commit comments

Comments
 (0)