Skip to content

Commit 3390b3c

Browse files
Quentin PerretMarc Zyngier
authored andcommitted
KVM: arm64: Move hyp state to hyp_vmemmap
Tracking the hypervisor's ownership state into struct hyp_page has several benefits, including allowing far more efficient lookups (no page-table walk needed) and de-corelating the state from the presence of a mapping. This will later allow to map pages into EL2 stage-1 less proactively which is generally a good thing for security. And in the future this will help with tracking the state of pages mapped into the hypervisor's private range without requiring an alias into the 'linear map' range. Reviewed-by: Marc Zyngier <maz@kernel.org> Signed-off-by: Quentin Perret <qperret@google.com> Link: https://lore.kernel.org/r/20250416152648.2982950-6-qperret@google.com Signed-off-by: Marc Zyngier <maz@kernel.org>
1 parent ba5b2e5 commit 3390b3c

File tree

3 files changed

+53
-33
lines changed

3 files changed

+53
-33
lines changed

arch/arm64/kvm/hyp/include/nvhe/memory.h

Lines changed: 23 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -16,22 +16,22 @@
1616
* accessible by another component;
1717
* 10: The page is accessible but not owned by the component;
1818
* The storage of this state depends on the component: either in the
19-
* hyp_vmemmap for the host state or in PTE software bits for the hypervisor
20-
* and guests.
19+
* hyp_vmemmap for the host and hyp states or in PTE software bits for guests.
2120
*/
2221
enum pkvm_page_state {
2322
PKVM_PAGE_OWNED = 0ULL,
2423
PKVM_PAGE_SHARED_OWNED = BIT(0),
2524
PKVM_PAGE_SHARED_BORROWED = BIT(1),
2625

2726
/*
28-
* 'Meta-states' are not stored directly in PTE SW bits for hyp and
29-
* guest states, but inferred from the context (e.g. invalid PTE
30-
* entries). For the host, meta-states are stored directly in the
27+
* 'Meta-states' are not stored directly in PTE SW bits for guest
28+
* states, but inferred from the context (e.g. invalid PTE entries).
29+
* For the host and hyp, meta-states are stored directly in the
3130
* struct hyp_page.
3231
*/
3332
PKVM_NOPAGE = BIT(0) | BIT(1),
3433
};
34+
#define PKVM_PAGE_STATE_MASK (BIT(0) | BIT(1))
3535

3636
#define PKVM_PAGE_STATE_PROT_MASK (KVM_PGTABLE_PROT_SW0 | KVM_PGTABLE_PROT_SW1)
3737
static inline enum kvm_pgtable_prot pkvm_mkstate(enum kvm_pgtable_prot prot,
@@ -52,7 +52,14 @@ struct hyp_page {
5252
u8 order;
5353

5454
/* Host state. Guarded by the host stage-2 lock. */
55-
unsigned __host_state : 8;
55+
unsigned __host_state : 4;
56+
57+
/*
58+
* Complement of the hyp state. Guarded by the hyp stage-1 lock. We use
59+
* the complement so that the initial 0 in __hyp_state_comp (due to the
60+
* entire vmemmap starting off zeroed) encodes PKVM_NOPAGE.
61+
*/
62+
unsigned __hyp_state_comp : 4;
5663

5764
u32 host_share_guest_count;
5865
};
@@ -99,6 +106,16 @@ static inline void set_host_state(phys_addr_t phys, enum pkvm_page_state state)
99106
hyp_phys_to_page(phys)->__host_state = state;
100107
}
101108

109+
static inline enum pkvm_page_state get_hyp_state(phys_addr_t phys)
110+
{
111+
return hyp_phys_to_page(phys)->__hyp_state_comp ^ PKVM_PAGE_STATE_MASK;
112+
}
113+
114+
static inline void set_hyp_state(phys_addr_t phys, enum pkvm_page_state state)
115+
{
116+
hyp_phys_to_page(phys)->__hyp_state_comp = state ^ PKVM_PAGE_STATE_MASK;
117+
}
118+
102119
/*
103120
* Refcounting for 'struct hyp_page'.
104121
* hyp_pool::lock must be held if atomic access to the refcount is required.

arch/arm64/kvm/hyp/nvhe/mem_protect.c

Lines changed: 25 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -649,24 +649,24 @@ static int __host_set_page_state_range(u64 addr, u64 size,
649649
return 0;
650650
}
651651

652-
static enum pkvm_page_state hyp_get_page_state(kvm_pte_t pte, u64 addr)
652+
static void __hyp_set_page_state_range(phys_addr_t phys, u64 size, enum pkvm_page_state state)
653653
{
654-
if (!kvm_pte_valid(pte))
655-
return PKVM_NOPAGE;
654+
phys_addr_t end = phys + size;
656655

657-
return pkvm_getstate(kvm_pgtable_hyp_pte_prot(pte));
656+
for (; phys < end; phys += PAGE_SIZE)
657+
set_hyp_state(phys, state);
658658
}
659659

660-
static int __hyp_check_page_state_range(u64 addr, u64 size,
661-
enum pkvm_page_state state)
660+
static int __hyp_check_page_state_range(phys_addr_t phys, u64 size, enum pkvm_page_state state)
662661
{
663-
struct check_walk_data d = {
664-
.desired = state,
665-
.get_page_state = hyp_get_page_state,
666-
};
662+
phys_addr_t end = phys + size;
663+
664+
for (; phys < end; phys += PAGE_SIZE) {
665+
if (get_hyp_state(phys) != state)
666+
return -EPERM;
667+
}
667668

668-
hyp_assert_lock_held(&pkvm_pgd_lock);
669-
return check_page_state_range(&pkvm_pgtable, addr, size, &d);
669+
return 0;
670670
}
671671

672672
static enum pkvm_page_state guest_get_page_state(kvm_pte_t pte, u64 addr)
@@ -694,7 +694,6 @@ int __pkvm_host_share_hyp(u64 pfn)
694694
{
695695
u64 phys = hyp_pfn_to_phys(pfn);
696696
void *virt = __hyp_va(phys);
697-
enum kvm_pgtable_prot prot;
698697
u64 size = PAGE_SIZE;
699698
int ret;
700699

@@ -705,13 +704,13 @@ int __pkvm_host_share_hyp(u64 pfn)
705704
if (ret)
706705
goto unlock;
707706
if (IS_ENABLED(CONFIG_NVHE_EL2_DEBUG)) {
708-
ret = __hyp_check_page_state_range((u64)virt, size, PKVM_NOPAGE);
707+
ret = __hyp_check_page_state_range(phys, size, PKVM_NOPAGE);
709708
if (ret)
710709
goto unlock;
711710
}
712711

713-
prot = pkvm_mkstate(PAGE_HYP, PKVM_PAGE_SHARED_BORROWED);
714-
WARN_ON(pkvm_create_mappings_locked(virt, virt + size, prot));
712+
__hyp_set_page_state_range(phys, size, PKVM_PAGE_SHARED_BORROWED);
713+
WARN_ON(pkvm_create_mappings_locked(virt, virt + size, PAGE_HYP));
715714
WARN_ON(__host_set_page_state_range(phys, size, PKVM_PAGE_SHARED_OWNED));
716715

717716
unlock:
@@ -734,14 +733,15 @@ int __pkvm_host_unshare_hyp(u64 pfn)
734733
ret = __host_check_page_state_range(phys, size, PKVM_PAGE_SHARED_OWNED);
735734
if (ret)
736735
goto unlock;
737-
ret = __hyp_check_page_state_range(virt, size, PKVM_PAGE_SHARED_BORROWED);
736+
ret = __hyp_check_page_state_range(phys, size, PKVM_PAGE_SHARED_BORROWED);
738737
if (ret)
739738
goto unlock;
740739
if (hyp_page_count((void *)virt)) {
741740
ret = -EBUSY;
742741
goto unlock;
743742
}
744743

744+
__hyp_set_page_state_range(phys, size, PKVM_NOPAGE);
745745
WARN_ON(kvm_pgtable_hyp_unmap(&pkvm_pgtable, virt, size) != size);
746746
WARN_ON(__host_set_page_state_range(phys, size, PKVM_PAGE_OWNED));
747747

@@ -757,7 +757,6 @@ int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages)
757757
u64 phys = hyp_pfn_to_phys(pfn);
758758
u64 size = PAGE_SIZE * nr_pages;
759759
void *virt = __hyp_va(phys);
760-
enum kvm_pgtable_prot prot;
761760
int ret;
762761

763762
host_lock_component();
@@ -767,13 +766,13 @@ int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages)
767766
if (ret)
768767
goto unlock;
769768
if (IS_ENABLED(CONFIG_NVHE_EL2_DEBUG)) {
770-
ret = __hyp_check_page_state_range((u64)virt, size, PKVM_NOPAGE);
769+
ret = __hyp_check_page_state_range(phys, size, PKVM_NOPAGE);
771770
if (ret)
772771
goto unlock;
773772
}
774773

775-
prot = pkvm_mkstate(PAGE_HYP, PKVM_PAGE_OWNED);
776-
WARN_ON(pkvm_create_mappings_locked(virt, virt + size, prot));
774+
__hyp_set_page_state_range(phys, size, PKVM_PAGE_OWNED);
775+
WARN_ON(pkvm_create_mappings_locked(virt, virt + size, PAGE_HYP));
777776
WARN_ON(host_stage2_set_owner_locked(phys, size, PKVM_ID_HYP));
778777

779778
unlock:
@@ -793,7 +792,7 @@ int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages)
793792
host_lock_component();
794793
hyp_lock_component();
795794

796-
ret = __hyp_check_page_state_range(virt, size, PKVM_PAGE_OWNED);
795+
ret = __hyp_check_page_state_range(phys, size, PKVM_PAGE_OWNED);
797796
if (ret)
798797
goto unlock;
799798
if (IS_ENABLED(CONFIG_NVHE_EL2_DEBUG)) {
@@ -802,6 +801,7 @@ int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages)
802801
goto unlock;
803802
}
804803

804+
__hyp_set_page_state_range(phys, size, PKVM_NOPAGE);
805805
WARN_ON(kvm_pgtable_hyp_unmap(&pkvm_pgtable, virt, size) != size);
806806
WARN_ON(host_stage2_set_owner_locked(phys, size, PKVM_ID_HOST));
807807

@@ -816,19 +816,18 @@ int hyp_pin_shared_mem(void *from, void *to)
816816
{
817817
u64 cur, start = ALIGN_DOWN((u64)from, PAGE_SIZE);
818818
u64 end = PAGE_ALIGN((u64)to);
819+
u64 phys = __hyp_pa(start);
819820
u64 size = end - start;
820821
int ret;
821822

822823
host_lock_component();
823824
hyp_lock_component();
824825

825-
ret = __host_check_page_state_range(__hyp_pa(start), size,
826-
PKVM_PAGE_SHARED_OWNED);
826+
ret = __host_check_page_state_range(phys, size, PKVM_PAGE_SHARED_OWNED);
827827
if (ret)
828828
goto unlock;
829829

830-
ret = __hyp_check_page_state_range(start, size,
831-
PKVM_PAGE_SHARED_BORROWED);
830+
ret = __hyp_check_page_state_range(phys, size, PKVM_PAGE_SHARED_BORROWED);
832831
if (ret)
833832
goto unlock;
834833

arch/arm64/kvm/hyp/nvhe/setup.c

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -194,16 +194,20 @@ static int fix_host_ownership_walker(const struct kvm_pgtable_visit_ctx *ctx,
194194

195195
/*
196196
* Adjust the host stage-2 mappings to match the ownership attributes
197-
* configured in the hypervisor stage-1.
197+
* configured in the hypervisor stage-1, and make sure to propagate them
198+
* to the hyp_vmemmap state.
198199
*/
199200
state = pkvm_getstate(kvm_pgtable_hyp_pte_prot(ctx->old));
200201
switch (state) {
201202
case PKVM_PAGE_OWNED:
203+
set_hyp_state(phys, PKVM_PAGE_OWNED);
202204
return host_stage2_set_owner_locked(phys, PAGE_SIZE, PKVM_ID_HYP);
203205
case PKVM_PAGE_SHARED_OWNED:
206+
set_hyp_state(phys, PKVM_PAGE_SHARED_OWNED);
204207
set_host_state(phys, PKVM_PAGE_SHARED_BORROWED);
205208
break;
206209
case PKVM_PAGE_SHARED_BORROWED:
210+
set_hyp_state(phys, PKVM_PAGE_SHARED_BORROWED);
207211
set_host_state(phys, PKVM_PAGE_SHARED_OWNED);
208212
break;
209213
default:

0 commit comments

Comments
 (0)