Skip to content

Commit 48d8488

Browse files
Quentin PerretMarc Zyngier
authored andcommitted
KVM: arm64: Defer EL2 stage-1 mapping on share
We currently blindly map into EL2 stage-1 *any* page passed to the __pkvm_host_share_hyp() HVC. This is less than ideal from a security perspective as it makes exploitation of potential hypervisor gadgets easier than it should be. But interestingly, pKVM should never need to access SHARED_BORROWED pages that it hasn't previously pinned, so there is no need to map the page before that. Reviewed-by: Marc Zyngier <maz@kernel.org> Signed-off-by: Quentin Perret <qperret@google.com> Link: https://lore.kernel.org/r/20250416152648.2982950-7-qperret@google.com Signed-off-by: Marc Zyngier <maz@kernel.org>
1 parent 3390b3c commit 48d8488

File tree

1 file changed

+16
-7
lines changed

1 file changed

+16
-7
lines changed

arch/arm64/kvm/hyp/nvhe/mem_protect.c

Lines changed: 16 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -693,7 +693,6 @@ static int __guest_check_page_state_range(struct pkvm_hyp_vcpu *vcpu, u64 addr,
693693
int __pkvm_host_share_hyp(u64 pfn)
694694
{
695695
u64 phys = hyp_pfn_to_phys(pfn);
696-
void *virt = __hyp_va(phys);
697696
u64 size = PAGE_SIZE;
698697
int ret;
699698

@@ -710,7 +709,6 @@ int __pkvm_host_share_hyp(u64 pfn)
710709
}
711710

712711
__hyp_set_page_state_range(phys, size, PKVM_PAGE_SHARED_BORROWED);
713-
WARN_ON(pkvm_create_mappings_locked(virt, virt + size, PAGE_HYP));
714712
WARN_ON(__host_set_page_state_range(phys, size, PKVM_PAGE_SHARED_OWNED));
715713

716714
unlock:
@@ -742,7 +740,6 @@ int __pkvm_host_unshare_hyp(u64 pfn)
742740
}
743741

744742
__hyp_set_page_state_range(phys, size, PKVM_NOPAGE);
745-
WARN_ON(kvm_pgtable_hyp_unmap(&pkvm_pgtable, virt, size) != size);
746743
WARN_ON(__host_set_page_state_range(phys, size, PKVM_PAGE_OWNED));
747744

748745
unlock:
@@ -818,6 +815,7 @@ int hyp_pin_shared_mem(void *from, void *to)
818815
u64 end = PAGE_ALIGN((u64)to);
819816
u64 phys = __hyp_pa(start);
820817
u64 size = end - start;
818+
struct hyp_page *p;
821819
int ret;
822820

823821
host_lock_component();
@@ -831,8 +829,14 @@ int hyp_pin_shared_mem(void *from, void *to)
831829
if (ret)
832830
goto unlock;
833831

834-
for (cur = start; cur < end; cur += PAGE_SIZE)
835-
hyp_page_ref_inc(hyp_virt_to_page(cur));
832+
for (cur = start; cur < end; cur += PAGE_SIZE) {
833+
p = hyp_virt_to_page(cur);
834+
hyp_page_ref_inc(p);
835+
if (p->refcount == 1)
836+
WARN_ON(pkvm_create_mappings_locked((void *)cur,
837+
(void *)cur + PAGE_SIZE,
838+
PAGE_HYP));
839+
}
836840

837841
unlock:
838842
hyp_unlock_component();
@@ -845,12 +849,17 @@ void hyp_unpin_shared_mem(void *from, void *to)
845849
{
846850
u64 cur, start = ALIGN_DOWN((u64)from, PAGE_SIZE);
847851
u64 end = PAGE_ALIGN((u64)to);
852+
struct hyp_page *p;
848853

849854
host_lock_component();
850855
hyp_lock_component();
851856

852-
for (cur = start; cur < end; cur += PAGE_SIZE)
853-
hyp_page_ref_dec(hyp_virt_to_page(cur));
857+
for (cur = start; cur < end; cur += PAGE_SIZE) {
858+
p = hyp_virt_to_page(cur);
859+
if (p->refcount == 1)
860+
WARN_ON(kvm_pgtable_hyp_unmap(&pkvm_pgtable, cur, PAGE_SIZE) != PAGE_SIZE);
861+
hyp_page_ref_dec(p);
862+
}
854863

855864
hyp_unlock_component();
856865
host_unlock_component();

0 commit comments

Comments
 (0)