Skip to content

Commit 1f66f12

Browse files
author
Marc Zyngier
committed
Merge branch kvm-arm64/6.6/misc into kvmarm-master/next
* kvm-arm64/6.6/misc: : . : Misc KVM/arm64 updates for 6.6: : : - Don't unnecessary align non-stack allocations in the EL2 VA space : : - Drop HCR_VIRT_EXCP_MASK, which was never used... : : - Don't use smp_processor_id() in kvm_arch_vcpu_load(), : but the cpu parameter instead : : - Drop redundant call to kvm_set_pfn_accessed() in user_mem_abort() : : - Remove prototypes without implementations : . KVM: arm64: Remove size-order align in the nVHE hyp private VA range KVM: arm64: Remove unused declarations KVM: arm64: Remove redundant kvm_set_pfn_accessed() from user_mem_abort() KVM: arm64: Drop HCR_VIRT_EXCP_MASK KVM: arm64: Use the known cpu id instead of smp_processor_id() Signed-off-by: Marc Zyngier <maz@kernel.org>
2 parents 50a40ff + f156a7d commit 1f66f12

File tree

9 files changed

+139
-96
lines changed

9 files changed

+139
-96
lines changed

arch/arm64/include/asm/kvm_arm.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -98,7 +98,6 @@
9898
HCR_BSU_IS | HCR_FB | HCR_TACR | \
9999
HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_TLOR | \
100100
HCR_FMO | HCR_IMO | HCR_PTW | HCR_TID3)
101-
#define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF)
102101
#define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK | HCR_ATA)
103102
#define HCR_HOST_NVHE_PROTECTED_FLAGS (HCR_HOST_NVHE_FLAGS | HCR_TSC)
104103
#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)

arch/arm64/include/asm/kvm_host.h

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -974,8 +974,6 @@ void kvm_arm_resume_guest(struct kvm *kvm);
974974
#define kvm_call_hyp_nvhe(f, ...) f(__VA_ARGS__)
975975
#endif /* __KVM_NVHE_HYPERVISOR__ */
976976

977-
void force_vm_exit(const cpumask_t *mask);
978-
979977
int handle_exit(struct kvm_vcpu *vcpu, int exception_index);
980978
void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index);
981979

@@ -1057,8 +1055,6 @@ static inline bool kvm_system_needs_idmapped_vectors(void)
10571055
return cpus_have_const_cap(ARM64_SPECTRE_V3A);
10581056
}
10591057

1060-
void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu);
1061-
10621058
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
10631059
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
10641060

@@ -1130,8 +1126,6 @@ static inline bool kvm_vm_is_protected(struct kvm *kvm)
11301126
return false;
11311127
}
11321128

1133-
void kvm_init_protected_traps(struct kvm_vcpu *vcpu);
1134-
11351129
int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature);
11361130
bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
11371131

arch/arm64/include/asm/kvm_mmu.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -168,6 +168,7 @@ int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
168168
void __iomem **haddr);
169169
int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
170170
void **haddr);
171+
int create_hyp_stack(phys_addr_t phys_addr, unsigned long *haddr);
171172
void __init free_hyp_pgds(void);
172173

173174
void stage2_unmap_vm(struct kvm *kvm);

arch/arm64/kvm/arm.c

Lines changed: 2 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -463,7 +463,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
463463
vcpu_ptrauth_disable(vcpu);
464464
kvm_arch_vcpu_load_debug_state_flags(vcpu);
465465

466-
if (!cpumask_test_cpu(smp_processor_id(), vcpu->kvm->arch.supported_cpus))
466+
if (!cpumask_test_cpu(cpu, vcpu->kvm->arch.supported_cpus))
467467
vcpu_set_on_unsupported_cpu(vcpu);
468468
}
469469

@@ -2282,30 +2282,8 @@ static int __init init_hyp_mode(void)
22822282
for_each_possible_cpu(cpu) {
22832283
struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu);
22842284
char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu);
2285-
unsigned long hyp_addr;
22862285

2287-
/*
2288-
* Allocate a contiguous HYP private VA range for the stack
2289-
* and guard page. The allocation is also aligned based on
2290-
* the order of its size.
2291-
*/
2292-
err = hyp_alloc_private_va_range(PAGE_SIZE * 2, &hyp_addr);
2293-
if (err) {
2294-
kvm_err("Cannot allocate hyp stack guard page\n");
2295-
goto out_err;
2296-
}
2297-
2298-
/*
2299-
* Since the stack grows downwards, map the stack to the page
2300-
* at the higher address and leave the lower guard page
2301-
* unbacked.
2302-
*
2303-
* Any valid stack address now has the PAGE_SHIFT bit as 1
2304-
* and addresses corresponding to the guard page have the
2305-
* PAGE_SHIFT bit as 0 - this is used for overflow detection.
2306-
*/
2307-
err = __create_hyp_mappings(hyp_addr + PAGE_SIZE, PAGE_SIZE,
2308-
__pa(stack_page), PAGE_HYP);
2286+
err = create_hyp_stack(__pa(stack_page), &params->stack_hyp_va);
23092287
if (err) {
23102288
kvm_err("Cannot map hyp stack\n");
23112289
goto out_err;
@@ -2318,8 +2296,6 @@ static int __init init_hyp_mode(void)
23182296
* has been mapped in the flexible private VA space.
23192297
*/
23202298
params->stack_pa = __pa(stack_page);
2321-
2322-
params->stack_hyp_va = hyp_addr + (2 * PAGE_SIZE);
23232299
}
23242300

23252301
for_each_possible_cpu(cpu) {

arch/arm64/kvm/hyp/include/nvhe/mm.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@ int pkvm_create_mappings_locked(void *from, void *to, enum kvm_pgtable_prot prot
2626
int __pkvm_create_private_mapping(phys_addr_t phys, size_t size,
2727
enum kvm_pgtable_prot prot,
2828
unsigned long *haddr);
29+
int pkvm_create_stack(phys_addr_t phys, unsigned long *haddr);
2930
int pkvm_alloc_private_va_range(size_t size, unsigned long *haddr);
3031

3132
#endif /* __KVM_HYP_MM_H */

arch/arm64/kvm/hyp/nvhe/mm.c

Lines changed: 66 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,27 @@ static int __pkvm_create_mappings(unsigned long start, unsigned long size,
4444
return err;
4545
}
4646

47+
static int __pkvm_alloc_private_va_range(unsigned long start, size_t size)
48+
{
49+
unsigned long cur;
50+
51+
hyp_assert_lock_held(&pkvm_pgd_lock);
52+
53+
if (!start || start < __io_map_base)
54+
return -EINVAL;
55+
56+
/* The allocated size is always a multiple of PAGE_SIZE */
57+
cur = start + PAGE_ALIGN(size);
58+
59+
/* Are we overflowing on the vmemmap ? */
60+
if (cur > __hyp_vmemmap)
61+
return -ENOMEM;
62+
63+
__io_map_base = cur;
64+
65+
return 0;
66+
}
67+
4768
/**
4869
* pkvm_alloc_private_va_range - Allocates a private VA range.
4970
* @size: The size of the VA range to reserve.
@@ -56,27 +77,16 @@ static int __pkvm_create_mappings(unsigned long start, unsigned long size,
5677
*/
5778
int pkvm_alloc_private_va_range(size_t size, unsigned long *haddr)
5879
{
59-
unsigned long base, addr;
60-
int ret = 0;
80+
unsigned long addr;
81+
int ret;
6182

6283
hyp_spin_lock(&pkvm_pgd_lock);
63-
64-
/* Align the allocation based on the order of its size */
65-
addr = ALIGN(__io_map_base, PAGE_SIZE << get_order(size));
66-
67-
/* The allocated size is always a multiple of PAGE_SIZE */
68-
base = addr + PAGE_ALIGN(size);
69-
70-
/* Are we overflowing on the vmemmap ? */
71-
if (!addr || base > __hyp_vmemmap)
72-
ret = -ENOMEM;
73-
else {
74-
__io_map_base = base;
75-
*haddr = addr;
76-
}
77-
84+
addr = __io_map_base;
85+
ret = __pkvm_alloc_private_va_range(addr, size);
7886
hyp_spin_unlock(&pkvm_pgd_lock);
7987

88+
*haddr = addr;
89+
8090
return ret;
8191
}
8292

@@ -340,6 +350,45 @@ int hyp_create_idmap(u32 hyp_va_bits)
340350
return __pkvm_create_mappings(start, end - start, start, PAGE_HYP_EXEC);
341351
}
342352

353+
int pkvm_create_stack(phys_addr_t phys, unsigned long *haddr)
354+
{
355+
unsigned long addr, prev_base;
356+
size_t size;
357+
int ret;
358+
359+
hyp_spin_lock(&pkvm_pgd_lock);
360+
361+
prev_base = __io_map_base;
362+
/*
363+
* Efficient stack verification using the PAGE_SHIFT bit implies
364+
* an alignment of our allocation on the order of the size.
365+
*/
366+
size = PAGE_SIZE * 2;
367+
addr = ALIGN(__io_map_base, size);
368+
369+
ret = __pkvm_alloc_private_va_range(addr, size);
370+
if (!ret) {
371+
/*
372+
* Since the stack grows downwards, map the stack to the page
373+
* at the higher address and leave the lower guard page
374+
* unbacked.
375+
*
376+
* Any valid stack address now has the PAGE_SHIFT bit as 1
377+
* and addresses corresponding to the guard page have the
378+
* PAGE_SHIFT bit as 0 - this is used for overflow detection.
379+
*/
380+
ret = kvm_pgtable_hyp_map(&pkvm_pgtable, addr + PAGE_SIZE,
381+
PAGE_SIZE, phys, PAGE_HYP);
382+
if (ret)
383+
__io_map_base = prev_base;
384+
}
385+
hyp_spin_unlock(&pkvm_pgd_lock);
386+
387+
*haddr = addr + size;
388+
389+
return ret;
390+
}
391+
343392
static void *admit_host_page(void *arg)
344393
{
345394
struct kvm_hyp_memcache *host_mc = arg;

arch/arm64/kvm/hyp/nvhe/setup.c

Lines changed: 1 addition & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -113,41 +113,16 @@ static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
113113

114114
for (i = 0; i < hyp_nr_cpus; i++) {
115115
struct kvm_nvhe_init_params *params = per_cpu_ptr(&kvm_init_params, i);
116-
unsigned long hyp_addr;
117116

118117
start = (void *)kern_hyp_va(per_cpu_base[i]);
119118
end = start + PAGE_ALIGN(hyp_percpu_size);
120119
ret = pkvm_create_mappings(start, end, PAGE_HYP);
121120
if (ret)
122121
return ret;
123122

124-
/*
125-
* Allocate a contiguous HYP private VA range for the stack
126-
* and guard page. The allocation is also aligned based on
127-
* the order of its size.
128-
*/
129-
ret = pkvm_alloc_private_va_range(PAGE_SIZE * 2, &hyp_addr);
123+
ret = pkvm_create_stack(params->stack_pa, &params->stack_hyp_va);
130124
if (ret)
131125
return ret;
132-
133-
/*
134-
* Since the stack grows downwards, map the stack to the page
135-
* at the higher address and leave the lower guard page
136-
* unbacked.
137-
*
138-
* Any valid stack address now has the PAGE_SHIFT bit as 1
139-
* and addresses corresponding to the guard page have the
140-
* PAGE_SHIFT bit as 0 - this is used for overflow detection.
141-
*/
142-
hyp_spin_lock(&pkvm_pgd_lock);
143-
ret = kvm_pgtable_hyp_map(&pkvm_pgtable, hyp_addr + PAGE_SIZE,
144-
PAGE_SIZE, params->stack_pa, PAGE_HYP);
145-
hyp_spin_unlock(&pkvm_pgd_lock);
146-
if (ret)
147-
return ret;
148-
149-
/* Update stack_hyp_va to end of the stack's private VA range */
150-
params->stack_hyp_va = hyp_addr + (2 * PAGE_SIZE);
151126
}
152127

153128
/*

arch/arm64/kvm/mmu.c

Lines changed: 68 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -600,6 +600,25 @@ int create_hyp_mappings(void *from, void *to, enum kvm_pgtable_prot prot)
600600
return 0;
601601
}
602602

603+
static int __hyp_alloc_private_va_range(unsigned long base)
604+
{
605+
lockdep_assert_held(&kvm_hyp_pgd_mutex);
606+
607+
if (!PAGE_ALIGNED(base))
608+
return -EINVAL;
609+
610+
/*
611+
* Verify that BIT(VA_BITS - 1) hasn't been flipped by
612+
* allocating the new area, as it would indicate we've
613+
* overflowed the idmap/IO address range.
614+
*/
615+
if ((base ^ io_map_base) & BIT(VA_BITS - 1))
616+
return -ENOMEM;
617+
618+
io_map_base = base;
619+
620+
return 0;
621+
}
603622

604623
/**
605624
* hyp_alloc_private_va_range - Allocates a private VA range.
@@ -620,26 +639,16 @@ int hyp_alloc_private_va_range(size_t size, unsigned long *haddr)
620639

621640
/*
622641
* This assumes that we have enough space below the idmap
623-
* page to allocate our VAs. If not, the check below will
624-
* kick. A potential alternative would be to detect that
625-
* overflow and switch to an allocation above the idmap.
642+
* page to allocate our VAs. If not, the check in
643+
* __hyp_alloc_private_va_range() will kick. A potential
644+
* alternative would be to detect that overflow and switch
645+
* to an allocation above the idmap.
626646
*
627647
* The allocated size is always a multiple of PAGE_SIZE.
628648
*/
629-
base = io_map_base - PAGE_ALIGN(size);
630-
631-
/* Align the allocation based on the order of its size */
632-
base = ALIGN_DOWN(base, PAGE_SIZE << get_order(size));
633-
634-
/*
635-
* Verify that BIT(VA_BITS - 1) hasn't been flipped by
636-
* allocating the new area, as it would indicate we've
637-
* overflowed the idmap/IO address range.
638-
*/
639-
if ((base ^ io_map_base) & BIT(VA_BITS - 1))
640-
ret = -ENOMEM;
641-
else
642-
*haddr = io_map_base = base;
649+
size = PAGE_ALIGN(size);
650+
base = io_map_base - size;
651+
ret = __hyp_alloc_private_va_range(base);
643652

644653
mutex_unlock(&kvm_hyp_pgd_mutex);
645654

@@ -676,6 +685,48 @@ static int __create_hyp_private_mapping(phys_addr_t phys_addr, size_t size,
676685
return ret;
677686
}
678687

688+
int create_hyp_stack(phys_addr_t phys_addr, unsigned long *haddr)
689+
{
690+
unsigned long base;
691+
size_t size;
692+
int ret;
693+
694+
mutex_lock(&kvm_hyp_pgd_mutex);
695+
/*
696+
* Efficient stack verification using the PAGE_SHIFT bit implies
697+
* an alignment of our allocation on the order of the size.
698+
*/
699+
size = PAGE_SIZE * 2;
700+
base = ALIGN_DOWN(io_map_base - size, size);
701+
702+
ret = __hyp_alloc_private_va_range(base);
703+
704+
mutex_unlock(&kvm_hyp_pgd_mutex);
705+
706+
if (ret) {
707+
kvm_err("Cannot allocate hyp stack guard page\n");
708+
return ret;
709+
}
710+
711+
/*
712+
* Since the stack grows downwards, map the stack to the page
713+
* at the higher address and leave the lower guard page
714+
* unbacked.
715+
*
716+
* Any valid stack address now has the PAGE_SHIFT bit as 1
717+
* and addresses corresponding to the guard page have the
718+
* PAGE_SHIFT bit as 0 - this is used for overflow detection.
719+
*/
720+
ret = __create_hyp_mappings(base + PAGE_SIZE, PAGE_SIZE, phys_addr,
721+
PAGE_HYP);
722+
if (ret)
723+
kvm_err("Cannot map hyp stack\n");
724+
725+
*haddr = base + size;
726+
727+
return ret;
728+
}
729+
679730
/**
680731
* create_hyp_io_mappings - Map IO into both kernel and HYP
681732
* @phys_addr: The physical start address which gets mapped
@@ -1549,7 +1600,6 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
15491600

15501601
out_unlock:
15511602
read_unlock(&kvm->mmu_lock);
1552-
kvm_set_pfn_accessed(pfn);
15531603
kvm_release_pfn_clean(pfn);
15541604
return ret != -EAGAIN ? ret : 0;
15551605
}

arch/arm64/kvm/vgic/vgic.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -199,7 +199,6 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu);
199199
void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr);
200200
void vgic_v2_clear_lr(struct kvm_vcpu *vcpu, int lr);
201201
void vgic_v2_set_underflow(struct kvm_vcpu *vcpu);
202-
void vgic_v2_set_npie(struct kvm_vcpu *vcpu);
203202
int vgic_v2_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr);
204203
int vgic_v2_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
205204
int offset, u32 *val);
@@ -233,7 +232,6 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu);
233232
void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr);
234233
void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr);
235234
void vgic_v3_set_underflow(struct kvm_vcpu *vcpu);
236-
void vgic_v3_set_npie(struct kvm_vcpu *vcpu);
237235
void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
238236
void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
239237
void vgic_v3_enable(struct kvm_vcpu *vcpu);

0 commit comments

Comments
 (0)