@@ -283,7 +283,7 @@ static u64 __pkvm_mapping_start(struct pkvm_mapping *m)
283
283
284
284
static u64 __pkvm_mapping_end (struct pkvm_mapping * m )
285
285
{
286
- return (m -> gfn + 1 ) * PAGE_SIZE - 1 ;
286
+ return (m -> gfn + m -> nr_pages ) * PAGE_SIZE - 1 ;
287
287
}
288
288
289
289
INTERVAL_TREE_DEFINE (struct pkvm_mapping , node , u64 , __subtree_last ,
@@ -324,7 +324,8 @@ static int __pkvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 start, u64 e
324
324
return 0 ;
325
325
326
326
for_each_mapping_in_range_safe (pgt , start , end , mapping ) {
327
- ret = kvm_call_hyp_nvhe (__pkvm_host_unshare_guest , handle , mapping -> gfn , 1 );
327
+ ret = kvm_call_hyp_nvhe (__pkvm_host_unshare_guest , handle , mapping -> gfn ,
328
+ mapping -> nr_pages );
328
329
if (WARN_ON (ret ))
329
330
return ret ;
330
331
pkvm_mapping_remove (mapping , & pgt -> pkvm_mappings );
@@ -354,16 +355,32 @@ int pkvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
354
355
return - EINVAL ;
355
356
356
357
lockdep_assert_held_write (& kvm -> mmu_lock );
357
- ret = kvm_call_hyp_nvhe (__pkvm_host_share_guest , pfn , gfn , 1 , prot );
358
- if (ret ) {
359
- /* Is the gfn already mapped due to a racing vCPU? */
360
- if (ret == - EPERM )
358
+
359
+ /*
360
+ * Calling stage2_map() on top of existing mappings is either happening because of a race
361
+ * with another vCPU, or because we're changing between page and block mappings. As per
362
+ * user_mem_abort(), same-size permission faults are handled in the relax_perms() path.
363
+ */
364
+ mapping = pkvm_mapping_iter_first (& pgt -> pkvm_mappings , addr , addr + size - 1 );
365
+ if (mapping ) {
366
+ if (size == (mapping -> nr_pages * PAGE_SIZE ))
361
367
return - EAGAIN ;
368
+
369
+ /* Remove _any_ pkvm_mapping overlapping with the range, bigger or smaller. */
370
+ ret = __pkvm_pgtable_stage2_unmap (pgt , addr , addr + size );
371
+ if (ret )
372
+ return ret ;
373
+ mapping = NULL ;
362
374
}
363
375
376
+ ret = kvm_call_hyp_nvhe (__pkvm_host_share_guest , pfn , gfn , size / PAGE_SIZE , prot );
377
+ if (WARN_ON (ret ))
378
+ return ret ;
379
+
364
380
swap (mapping , cache -> mapping );
365
381
mapping -> gfn = gfn ;
366
382
mapping -> pfn = pfn ;
383
+ mapping -> nr_pages = size / PAGE_SIZE ;
367
384
pkvm_mapping_insert (mapping , & pgt -> pkvm_mappings );
368
385
369
386
return ret ;
@@ -385,7 +402,8 @@ int pkvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size)
385
402
386
403
lockdep_assert_held (& kvm -> mmu_lock );
387
404
for_each_mapping_in_range_safe (pgt , addr , addr + size , mapping ) {
388
- ret = kvm_call_hyp_nvhe (__pkvm_host_wrprotect_guest , handle , mapping -> gfn , 1 );
405
+ ret = kvm_call_hyp_nvhe (__pkvm_host_wrprotect_guest , handle , mapping -> gfn ,
406
+ mapping -> nr_pages );
389
407
if (WARN_ON (ret ))
390
408
break ;
391
409
}
@@ -400,7 +418,8 @@ int pkvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size)
400
418
401
419
lockdep_assert_held (& kvm -> mmu_lock );
402
420
for_each_mapping_in_range_safe (pgt , addr , addr + size , mapping )
403
- __clean_dcache_guest_page (pfn_to_kaddr (mapping -> pfn ), PAGE_SIZE );
421
+ __clean_dcache_guest_page (pfn_to_kaddr (mapping -> pfn ),
422
+ PAGE_SIZE * mapping -> nr_pages );
404
423
405
424
return 0 ;
406
425
}
@@ -415,7 +434,7 @@ bool pkvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr, u64
415
434
lockdep_assert_held (& kvm -> mmu_lock );
416
435
for_each_mapping_in_range_safe (pgt , addr , addr + size , mapping )
417
436
young |= kvm_call_hyp_nvhe (__pkvm_host_test_clear_young_guest , handle , mapping -> gfn ,
418
- 1 , mkold );
437
+ mapping -> nr_pages , mkold );
419
438
420
439
return young ;
421
440
}
0 commit comments