|
5 | 5 | */
|
6 | 6 |
|
7 | 7 | #include <linux/init.h>
|
| 8 | +#include <linux/interval_tree_generic.h> |
8 | 9 | #include <linux/kmemleak.h>
|
9 | 10 | #include <linux/kvm_host.h>
|
10 | 11 | #include <asm/kvm_mmu.h>
|
@@ -275,80 +276,67 @@ static int __init finalize_pkvm(void)
|
275 | 276 | }
|
276 | 277 | device_initcall_sync(finalize_pkvm);
|
277 | 278 |
|
278 |
| -static int cmp_mappings(struct rb_node *node, const struct rb_node *parent) |
| 279 | +static u64 __pkvm_mapping_start(struct pkvm_mapping *m) |
279 | 280 | {
|
280 |
| - struct pkvm_mapping *a = rb_entry(node, struct pkvm_mapping, node); |
281 |
| - struct pkvm_mapping *b = rb_entry(parent, struct pkvm_mapping, node); |
282 |
| - |
283 |
| - if (a->gfn < b->gfn) |
284 |
| - return -1; |
285 |
| - if (a->gfn > b->gfn) |
286 |
| - return 1; |
287 |
| - return 0; |
| 281 | + return m->gfn * PAGE_SIZE; |
288 | 282 | }
|
289 | 283 |
|
290 |
| -static struct rb_node *find_first_mapping_node(struct rb_root *root, u64 gfn) |
| 284 | +static u64 __pkvm_mapping_end(struct pkvm_mapping *m) |
291 | 285 | {
|
292 |
| - struct rb_node *node = root->rb_node, *prev = NULL; |
293 |
| - struct pkvm_mapping *mapping; |
294 |
| - |
295 |
| - while (node) { |
296 |
| - mapping = rb_entry(node, struct pkvm_mapping, node); |
297 |
| - if (mapping->gfn == gfn) |
298 |
| - return node; |
299 |
| - prev = node; |
300 |
| - node = (gfn < mapping->gfn) ? node->rb_left : node->rb_right; |
301 |
| - } |
302 |
| - |
303 |
| - return prev; |
| 286 | + return (m->gfn + 1) * PAGE_SIZE - 1; |
304 | 287 | }
|
305 | 288 |
|
| 289 | +INTERVAL_TREE_DEFINE(struct pkvm_mapping, node, u64, __subtree_last, |
| 290 | + __pkvm_mapping_start, __pkvm_mapping_end, static, |
| 291 | + pkvm_mapping); |
| 292 | + |
306 | 293 | /*
|
307 |
| - * __tmp is updated to rb_next(__tmp) *before* entering the body of the loop to allow freeing |
308 |
| - * of __map inline. |
| 294 | + * __tmp is updated to iter_first(pkvm_mappings) *before* entering the body of the loop to allow |
| 295 | + * freeing of __map inline. |
309 | 296 | */
|
310 | 297 | #define for_each_mapping_in_range_safe(__pgt, __start, __end, __map) \
|
311 |
| - for (struct rb_node *__tmp = find_first_mapping_node(&(__pgt)->pkvm_mappings, \ |
312 |
| - ((__start) >> PAGE_SHIFT)); \ |
| 298 | + for (struct pkvm_mapping *__tmp = pkvm_mapping_iter_first(&(__pgt)->pkvm_mappings, \ |
| 299 | + __start, __end - 1); \ |
313 | 300 | __tmp && ({ \
|
314 |
| - __map = rb_entry(__tmp, struct pkvm_mapping, node); \ |
315 |
| - __tmp = rb_next(__tmp); \ |
| 301 | + __map = __tmp; \ |
| 302 | + __tmp = pkvm_mapping_iter_next(__map, __start, __end - 1); \ |
316 | 303 | true; \
|
317 | 304 | }); \
|
318 |
| - ) \ |
319 |
| - if (__map->gfn < ((__start) >> PAGE_SHIFT)) \ |
320 |
| - continue; \ |
321 |
| - else if (__map->gfn >= ((__end) >> PAGE_SHIFT)) \ |
322 |
| - break; \ |
323 |
| - else |
| 305 | + ) |
324 | 306 |
|
325 | 307 | int pkvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
|
326 | 308 | struct kvm_pgtable_mm_ops *mm_ops)
|
327 | 309 | {
|
328 |
| - pgt->pkvm_mappings = RB_ROOT; |
| 310 | + pgt->pkvm_mappings = RB_ROOT_CACHED; |
329 | 311 | pgt->mmu = mmu;
|
330 | 312 |
|
331 | 313 | return 0;
|
332 | 314 | }
|
333 | 315 |
|
334 |
| -void pkvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt) |
| 316 | +static int __pkvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 start, u64 end) |
335 | 317 | {
|
336 | 318 | struct kvm *kvm = kvm_s2_mmu_to_kvm(pgt->mmu);
|
337 | 319 | pkvm_handle_t handle = kvm->arch.pkvm.handle;
|
338 | 320 | struct pkvm_mapping *mapping;
|
339 |
| - struct rb_node *node; |
| 321 | + int ret; |
340 | 322 |
|
341 | 323 | if (!handle)
|
342 |
| - return; |
| 324 | + return 0; |
343 | 325 |
|
344 |
| - node = rb_first(&pgt->pkvm_mappings); |
345 |
| - while (node) { |
346 |
| - mapping = rb_entry(node, struct pkvm_mapping, node); |
347 |
| - kvm_call_hyp_nvhe(__pkvm_host_unshare_guest, handle, mapping->gfn); |
348 |
| - node = rb_next(node); |
349 |
| - rb_erase(&mapping->node, &pgt->pkvm_mappings); |
| 326 | + for_each_mapping_in_range_safe(pgt, start, end, mapping) { |
| 327 | + ret = kvm_call_hyp_nvhe(__pkvm_host_unshare_guest, handle, mapping->gfn, 1); |
| 328 | + if (WARN_ON(ret)) |
| 329 | + return ret; |
| 330 | + pkvm_mapping_remove(mapping, &pgt->pkvm_mappings); |
350 | 331 | kfree(mapping);
|
351 | 332 | }
|
| 333 | + |
| 334 | + return 0; |
| 335 | +} |
| 336 | + |
| 337 | +void pkvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt) |
| 338 | +{ |
| 339 | + __pkvm_pgtable_stage2_unmap(pgt, 0, ~(0ULL)); |
352 | 340 | }
|
353 | 341 |
|
354 | 342 | int pkvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
|
@@ -376,28 +364,16 @@ int pkvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
|
376 | 364 | swap(mapping, cache->mapping);
|
377 | 365 | mapping->gfn = gfn;
|
378 | 366 | mapping->pfn = pfn;
|
379 |
| - WARN_ON(rb_find_add(&mapping->node, &pgt->pkvm_mappings, cmp_mappings)); |
| 367 | + pkvm_mapping_insert(mapping, &pgt->pkvm_mappings); |
380 | 368 |
|
381 | 369 | return ret;
|
382 | 370 | }
|
383 | 371 |
|
384 | 372 | int pkvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size)
|
385 | 373 | {
|
386 |
| - struct kvm *kvm = kvm_s2_mmu_to_kvm(pgt->mmu); |
387 |
| - pkvm_handle_t handle = kvm->arch.pkvm.handle; |
388 |
| - struct pkvm_mapping *mapping; |
389 |
| - int ret = 0; |
390 |
| - |
391 |
| - lockdep_assert_held_write(&kvm->mmu_lock); |
392 |
| - for_each_mapping_in_range_safe(pgt, addr, addr + size, mapping) { |
393 |
| - ret = kvm_call_hyp_nvhe(__pkvm_host_unshare_guest, handle, mapping->gfn, 1); |
394 |
| - if (WARN_ON(ret)) |
395 |
| - break; |
396 |
| - rb_erase(&mapping->node, &pgt->pkvm_mappings); |
397 |
| - kfree(mapping); |
398 |
| - } |
| 374 | + lockdep_assert_held_write(&kvm_s2_mmu_to_kvm(pgt->mmu)->mmu_lock); |
399 | 375 |
|
400 |
| - return ret; |
| 376 | + return __pkvm_pgtable_stage2_unmap(pgt, addr, addr + size); |
401 | 377 | }
|
402 | 378 |
|
403 | 379 | int pkvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size)
|
|
0 commit comments