Skip to content

Commit db14091

Browse files
vdonnefortMarc Zyngier
authored andcommitted
KVM: arm64: Stage-2 huge mappings for np-guests
Now np-guests hypercalls with range are supported, we can let the hypervisor to install block mappings whenever the Stage-1 allows it, that is when backed by either Hugetlbfs or THPs. The size of those block mappings is limited to PMD_SIZE. Signed-off-by: Vincent Donnefort <vdonnefort@google.com> Link: https://lore.kernel.org/r/20250521124834.1070650-10-vdonnefort@google.com Signed-off-by: Marc Zyngier <maz@kernel.org>
1 parent 3669ddd commit db14091

File tree

3 files changed

+20
-11
lines changed

3 files changed

+20
-11
lines changed

arch/arm64/kvm/hyp/nvhe/mem_protect.c

Lines changed: 14 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -166,12 +166,6 @@ int kvm_host_prepare_stage2(void *pgt_pool_base)
166166
return 0;
167167
}
168168

169-
static bool guest_stage2_force_pte_cb(u64 addr, u64 end,
170-
enum kvm_pgtable_prot prot)
171-
{
172-
return true;
173-
}
174-
175169
static void *guest_s2_zalloc_pages_exact(size_t size)
176170
{
177171
void *addr = hyp_alloc_pages(&current_vm->pool, get_order(size));
@@ -278,8 +272,7 @@ int kvm_guest_prepare_stage2(struct pkvm_hyp_vm *vm, void *pgd)
278272
};
279273

280274
guest_lock_component(vm);
281-
ret = __kvm_pgtable_stage2_init(mmu->pgt, mmu, &vm->mm_ops, 0,
282-
guest_stage2_force_pte_cb);
275+
ret = __kvm_pgtable_stage2_init(mmu->pgt, mmu, &vm->mm_ops, 0, NULL);
283276
guest_unlock_component(vm);
284277
if (ret)
285278
return ret;
@@ -908,12 +901,24 @@ int __pkvm_host_unshare_ffa(u64 pfn, u64 nr_pages)
908901

909902
static int __guest_check_transition_size(u64 phys, u64 ipa, u64 nr_pages, u64 *size)
910903
{
904+
size_t block_size;
905+
911906
if (nr_pages == 1) {
912907
*size = PAGE_SIZE;
913908
return 0;
914909
}
915910

916-
return -EINVAL;
911+
/* We solely support second to last level huge mapping */
912+
block_size = kvm_granule_size(KVM_PGTABLE_LAST_LEVEL - 1);
913+
914+
if (nr_pages != block_size >> PAGE_SHIFT)
915+
return -EINVAL;
916+
917+
if (!IS_ALIGNED(phys | ipa, block_size))
918+
return -EINVAL;
919+
920+
*size = block_size;
921+
return 0;
917922
}
918923

919924
int __pkvm_host_share_guest(u64 pfn, u64 gfn, u64 nr_pages, struct pkvm_hyp_vcpu *vcpu,

arch/arm64/kvm/mmu.c

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1304,6 +1304,10 @@ static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot,
13041304
if (map_size == PAGE_SIZE)
13051305
return true;
13061306

1307+
/* pKVM only supports PMD_SIZE huge-mappings */
1308+
if (is_protected_kvm_enabled() && map_size != PMD_SIZE)
1309+
return false;
1310+
13071311
size = memslot->npages * PAGE_SIZE;
13081312

13091313
gpa_start = memslot->base_gfn << PAGE_SHIFT;
@@ -1537,7 +1541,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
15371541
* logging_active is guaranteed to never be true for VM_PFNMAP
15381542
* memslots.
15391543
*/
1540-
if (logging_active || is_protected_kvm_enabled()) {
1544+
if (logging_active) {
15411545
force_pte = true;
15421546
vma_shift = PAGE_SHIFT;
15431547
} else {

arch/arm64/kvm/pkvm.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -351,7 +351,7 @@ int pkvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
351351
u64 pfn = phys >> PAGE_SHIFT;
352352
int ret;
353353

354-
if (size != PAGE_SIZE)
354+
if (size != PAGE_SIZE && size != PMD_SIZE)
355355
return -EINVAL;
356356

357357
lockdep_assert_held_write(&kvm->mmu_lock);

0 commit comments

Comments
 (0)