Skip to content

Commit df26b77

Browse files
committed
Merge branch kvm-arm64/stage2-vhe-load into kvmarm/next
* kvm-arm64/stage2-vhe-load: : Setup stage-2 MMU from vcpu_load() for VHE : : Unlike nVHE, there is no need to switch the stage-2 MMU around on guest : entry/exit in VHE mode as the host is running at EL2. Despite this KVM : reloads the stage-2 on every guest entry, which is needless. : : This series moves the setup of the stage-2 MMU context to vcpu_load() : when running in VHE mode. This is likely to be a win across the board, : but also allows us to remove an ISB on the guest entry path for systems : with one of the speculative AT errata. KVM: arm64: Move VTCR_EL2 into struct s2_mmu KVM: arm64: Load the stage-2 MMU context in kvm_vcpu_load_vhe() KVM: arm64: Rename helpers for VHE vCPU load/put KVM: arm64: Reload stage-2 for VMID change on VHE KVM: arm64: Restore the stage-2 context in VHE's __tlb_switch_to_host() KVM: arm64: Don't zero VTTBR in __tlb_switch_to_host() Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
2 parents 51e6079 + fe49fd9 commit df26b77

File tree

15 files changed

+90
-62
lines changed

15 files changed

+90
-62
lines changed

arch/arm64/include/asm/kvm_host.h

Lines changed: 13 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -158,6 +158,16 @@ struct kvm_s2_mmu {
158158
phys_addr_t pgd_phys;
159159
struct kvm_pgtable *pgt;
160160

161+
/*
162+
* VTCR value used on the host. For a non-NV guest (or a NV
163+
* guest that runs in a context where its own S2 doesn't
164+
* apply), its T0SZ value reflects that of the IPA size.
165+
*
166+
* For a shadow S2 MMU, T0SZ reflects the PARange exposed to
167+
* the guest.
168+
*/
169+
u64 vtcr;
170+
161171
/* The last vcpu id that ran on each physical CPU */
162172
int __percpu *last_vcpu_ran;
163173

@@ -205,9 +215,6 @@ struct kvm_protected_vm {
205215
struct kvm_arch {
206216
struct kvm_s2_mmu mmu;
207217

208-
/* VTCR_EL2 value for this VM */
209-
u64 vtcr;
210-
211218
/* Interrupt controller */
212219
struct vgic_dist vgic;
213220

@@ -1020,7 +1027,7 @@ int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
10201027
extern unsigned int __ro_after_init kvm_arm_vmid_bits;
10211028
int __init kvm_arm_vmid_alloc_init(void);
10221029
void __init kvm_arm_vmid_alloc_free(void);
1023-
void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid);
1030+
bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid);
10241031
void kvm_arm_vmid_clear_active(void);
10251032

10261033
static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)
@@ -1104,8 +1111,8 @@ static inline bool kvm_set_pmuserenr(u64 val)
11041111
}
11051112
#endif
11061113

1107-
void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu);
1108-
void kvm_vcpu_put_sysregs_vhe(struct kvm_vcpu *vcpu);
1114+
void kvm_vcpu_load_vhe(struct kvm_vcpu *vcpu);
1115+
void kvm_vcpu_put_vhe(struct kvm_vcpu *vcpu);
11091116

11101117
int __init kvm_set_ipa_limit(void);
11111118

arch/arm64/include/asm/kvm_hyp.h

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -93,6 +93,8 @@ void __timer_disable_traps(struct kvm_vcpu *vcpu);
9393
void __sysreg_save_state_nvhe(struct kvm_cpu_context *ctxt);
9494
void __sysreg_restore_state_nvhe(struct kvm_cpu_context *ctxt);
9595
#else
96+
void __vcpu_load_switch_sysregs(struct kvm_vcpu *vcpu);
97+
void __vcpu_put_switch_sysregs(struct kvm_vcpu *vcpu);
9698
void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt);
9799
void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt);
98100
void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt);
@@ -111,11 +113,6 @@ void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
111113
void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);
112114
void __sve_restore_state(void *sve_pffr, u32 *fpsr);
113115

114-
#ifndef __KVM_NVHE_HYPERVISOR__
115-
void activate_traps_vhe_load(struct kvm_vcpu *vcpu);
116-
void deactivate_traps_vhe_put(struct kvm_vcpu *vcpu);
117-
#endif
118-
119116
u64 __guest_enter(struct kvm_vcpu *vcpu);
120117

121118
bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt, u32 func_id);

arch/arm64/include/asm/kvm_mmu.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -150,9 +150,9 @@ static __always_inline unsigned long __kern_hyp_va(unsigned long v)
150150
*/
151151
#define KVM_PHYS_SHIFT (40)
152152

153-
#define kvm_phys_shift(kvm) VTCR_EL2_IPA(kvm->arch.vtcr)
154-
#define kvm_phys_size(kvm) (_AC(1, ULL) << kvm_phys_shift(kvm))
155-
#define kvm_phys_mask(kvm) (kvm_phys_size(kvm) - _AC(1, ULL))
153+
#define kvm_phys_shift(mmu) VTCR_EL2_IPA((mmu)->vtcr)
154+
#define kvm_phys_size(mmu) (_AC(1, ULL) << kvm_phys_shift(mmu))
155+
#define kvm_phys_mask(mmu) (kvm_phys_size(mmu) - _AC(1, ULL))
156156

157157
#include <asm/kvm_pgtable.h>
158158
#include <asm/stage2_pgtable.h>
@@ -324,7 +324,7 @@ static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu)
324324
static __always_inline void __load_stage2(struct kvm_s2_mmu *mmu,
325325
struct kvm_arch *arch)
326326
{
327-
write_sysreg(arch->vtcr, vtcr_el2);
327+
write_sysreg(mmu->vtcr, vtcr_el2);
328328
write_sysreg(kvm_get_vttbr(mmu), vttbr_el2);
329329

330330
/*

arch/arm64/include/asm/stage2_pgtable.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,13 +21,13 @@
2121
* (IPA_SHIFT - 4).
2222
*/
2323
#define stage2_pgtable_levels(ipa) ARM64_HW_PGTABLE_LEVELS((ipa) - 4)
24-
#define kvm_stage2_levels(kvm) VTCR_EL2_LVLS(kvm->arch.vtcr)
24+
#define kvm_stage2_levels(mmu) VTCR_EL2_LVLS((mmu)->vtcr)
2525

2626
/*
2727
* kvm_mmmu_cache_min_pages() is the number of pages required to install
2828
* a stage-2 translation. We pre-allocate the entry level page table at
2929
* the VM creation.
3030
*/
31-
#define kvm_mmu_cache_min_pages(kvm) (kvm_stage2_levels(kvm) - 1)
31+
#define kvm_mmu_cache_min_pages(mmu) (kvm_stage2_levels(mmu) - 1)
3232

3333
#endif /* __ARM64_S2_PGTABLE_H_ */

arch/arm64/kvm/arm.c

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -447,7 +447,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
447447
kvm_vgic_load(vcpu);
448448
kvm_timer_vcpu_load(vcpu);
449449
if (has_vhe())
450-
kvm_vcpu_load_sysregs_vhe(vcpu);
450+
kvm_vcpu_load_vhe(vcpu);
451451
kvm_arch_vcpu_load_fp(vcpu);
452452
kvm_vcpu_pmu_restore_guest(vcpu);
453453
if (kvm_arm_is_pvtime_enabled(&vcpu->arch))
@@ -471,7 +471,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
471471
kvm_arch_vcpu_put_debug_state_flags(vcpu);
472472
kvm_arch_vcpu_put_fp(vcpu);
473473
if (has_vhe())
474-
kvm_vcpu_put_sysregs_vhe(vcpu);
474+
kvm_vcpu_put_vhe(vcpu);
475475
kvm_timer_vcpu_put(vcpu);
476476
kvm_vgic_put(vcpu);
477477
kvm_vcpu_pmu_restore_host(vcpu);
@@ -949,7 +949,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
949949
* making a thread's VMID inactive. So we need to call
950950
* kvm_arm_vmid_update() in non-premptible context.
951951
*/
952-
kvm_arm_vmid_update(&vcpu->arch.hw_mmu->vmid);
952+
if (kvm_arm_vmid_update(&vcpu->arch.hw_mmu->vmid) &&
953+
has_vhe())
954+
__load_stage2(vcpu->arch.hw_mmu,
955+
vcpu->arch.hw_mmu->arch);
953956

954957
kvm_pmu_flush_hwstate(vcpu);
955958

arch/arm64/kvm/hyp/nvhe/mem_protect.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -129,8 +129,8 @@ static void prepare_host_vtcr(void)
129129
parange = kvm_get_parange(id_aa64mmfr0_el1_sys_val);
130130
phys_shift = id_aa64mmfr0_parange_to_phys_shift(parange);
131131

132-
host_mmu.arch.vtcr = kvm_get_vtcr(id_aa64mmfr0_el1_sys_val,
133-
id_aa64mmfr1_el1_sys_val, phys_shift);
132+
host_mmu.arch.mmu.vtcr = kvm_get_vtcr(id_aa64mmfr0_el1_sys_val,
133+
id_aa64mmfr1_el1_sys_val, phys_shift);
134134
}
135135

136136
static bool host_stage2_force_pte_cb(u64 addr, u64 end, enum kvm_pgtable_prot prot);
@@ -235,7 +235,7 @@ int kvm_guest_prepare_stage2(struct pkvm_hyp_vm *vm, void *pgd)
235235
unsigned long nr_pages;
236236
int ret;
237237

238-
nr_pages = kvm_pgtable_stage2_pgd_size(vm->kvm.arch.vtcr) >> PAGE_SHIFT;
238+
nr_pages = kvm_pgtable_stage2_pgd_size(mmu->vtcr) >> PAGE_SHIFT;
239239
ret = hyp_pool_init(&vm->pool, hyp_virt_to_pfn(pgd), nr_pages, 0);
240240
if (ret)
241241
return ret;
@@ -295,7 +295,7 @@ int __pkvm_prot_finalize(void)
295295
return -EPERM;
296296

297297
params->vttbr = kvm_get_vttbr(mmu);
298-
params->vtcr = host_mmu.arch.vtcr;
298+
params->vtcr = mmu->vtcr;
299299
params->hcr_el2 |= HCR_VM;
300300

301301
/*

arch/arm64/kvm/hyp/nvhe/pkvm.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -303,7 +303,7 @@ static void init_pkvm_hyp_vm(struct kvm *host_kvm, struct pkvm_hyp_vm *hyp_vm,
303303
{
304304
hyp_vm->host_kvm = host_kvm;
305305
hyp_vm->kvm.created_vcpus = nr_vcpus;
306-
hyp_vm->kvm.arch.vtcr = host_mmu.arch.vtcr;
306+
hyp_vm->kvm.arch.mmu.vtcr = host_mmu.arch.mmu.vtcr;
307307
}
308308

309309
static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu,
@@ -483,7 +483,7 @@ int __pkvm_init_vm(struct kvm *host_kvm, unsigned long vm_hva,
483483
}
484484

485485
vm_size = pkvm_get_hyp_vm_size(nr_vcpus);
486-
pgd_size = kvm_pgtable_stage2_pgd_size(host_mmu.arch.vtcr);
486+
pgd_size = kvm_pgtable_stage2_pgd_size(host_mmu.arch.mmu.vtcr);
487487

488488
ret = -ENOMEM;
489489

arch/arm64/kvm/hyp/pgtable.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1511,7 +1511,7 @@ int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
15111511
kvm_pgtable_force_pte_cb_t force_pte_cb)
15121512
{
15131513
size_t pgd_sz;
1514-
u64 vtcr = mmu->arch->vtcr;
1514+
u64 vtcr = mmu->vtcr;
15151515
u32 ia_bits = VTCR_EL2_IPA(vtcr);
15161516
u32 sl0 = FIELD_GET(VTCR_EL2_SL0_MASK, vtcr);
15171517
u32 start_level = VTCR_EL2_TGRAN_SL0_BASE - sl0;

arch/arm64/kvm/hyp/vhe/switch.c

Lines changed: 20 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -93,12 +93,12 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
9393
NOKPROBE_SYMBOL(__deactivate_traps);
9494

9595
/*
96-
* Disable IRQs in {activate,deactivate}_traps_vhe_{load,put}() to
96+
* Disable IRQs in __vcpu_{load,put}_{activate,deactivate}_traps() to
9797
* prevent a race condition between context switching of PMUSERENR_EL0
9898
* in __{activate,deactivate}_traps_common() and IPIs that attempts to
9999
* update PMUSERENR_EL0. See also kvm_set_pmuserenr().
100100
*/
101-
void activate_traps_vhe_load(struct kvm_vcpu *vcpu)
101+
static void __vcpu_load_activate_traps(struct kvm_vcpu *vcpu)
102102
{
103103
unsigned long flags;
104104

@@ -107,7 +107,7 @@ void activate_traps_vhe_load(struct kvm_vcpu *vcpu)
107107
local_irq_restore(flags);
108108
}
109109

110-
void deactivate_traps_vhe_put(struct kvm_vcpu *vcpu)
110+
static void __vcpu_put_deactivate_traps(struct kvm_vcpu *vcpu)
111111
{
112112
unsigned long flags;
113113

@@ -116,6 +116,19 @@ void deactivate_traps_vhe_put(struct kvm_vcpu *vcpu)
116116
local_irq_restore(flags);
117117
}
118118

119+
void kvm_vcpu_load_vhe(struct kvm_vcpu *vcpu)
120+
{
121+
__vcpu_load_switch_sysregs(vcpu);
122+
__vcpu_load_activate_traps(vcpu);
123+
__load_stage2(vcpu->arch.hw_mmu, vcpu->arch.hw_mmu->arch);
124+
}
125+
126+
void kvm_vcpu_put_vhe(struct kvm_vcpu *vcpu)
127+
{
128+
__vcpu_put_deactivate_traps(vcpu);
129+
__vcpu_put_switch_sysregs(vcpu);
130+
}
131+
119132
static const exit_handler_fn hyp_exit_handlers[] = {
120133
[0 ... ESR_ELx_EC_MAX] = NULL,
121134
[ESR_ELx_EC_CP15_32] = kvm_hyp_handle_cp15_32,
@@ -170,17 +183,11 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
170183
sysreg_save_host_state_vhe(host_ctxt);
171184

172185
/*
173-
* ARM erratum 1165522 requires us to configure both stage 1 and
174-
* stage 2 translation for the guest context before we clear
175-
* HCR_EL2.TGE.
176-
*
177-
* We have already configured the guest's stage 1 translation in
178-
* kvm_vcpu_load_sysregs_vhe above. We must now call
179-
* __load_stage2 before __activate_traps, because
180-
* __load_stage2 configures stage 2 translation, and
181-
* __activate_traps clear HCR_EL2.TGE (among other things).
186+
* Note that ARM erratum 1165522 requires us to configure both stage 1
187+
* and stage 2 translation for the guest context before we clear
188+
* HCR_EL2.TGE. The stage 1 and stage 2 guest context has already been
189+
* loaded on the CPU in kvm_vcpu_load_vhe().
182190
*/
183-
__load_stage2(vcpu->arch.hw_mmu, vcpu->arch.hw_mmu->arch);
184191
__activate_traps(vcpu);
185192

186193
__kvm_adjust_pc(vcpu);

arch/arm64/kvm/hyp/vhe/sysreg-sr.c

Lines changed: 4 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt)
5252
NOKPROBE_SYMBOL(sysreg_restore_guest_state_vhe);
5353

5454
/**
55-
* kvm_vcpu_load_sysregs_vhe - Load guest system registers to the physical CPU
55+
* __vcpu_load_switch_sysregs - Load guest system registers to the physical CPU
5656
*
5757
* @vcpu: The VCPU pointer
5858
*
@@ -62,7 +62,7 @@ NOKPROBE_SYMBOL(sysreg_restore_guest_state_vhe);
6262
* and loading system register state early avoids having to load them on
6363
* every entry to the VM.
6464
*/
65-
void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu)
65+
void __vcpu_load_switch_sysregs(struct kvm_vcpu *vcpu)
6666
{
6767
struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
6868
struct kvm_cpu_context *host_ctxt;
@@ -92,12 +92,10 @@ void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu)
9292
__sysreg_restore_el1_state(guest_ctxt);
9393

9494
vcpu_set_flag(vcpu, SYSREGS_ON_CPU);
95-
96-
activate_traps_vhe_load(vcpu);
9795
}
9896

9997
/**
100-
* kvm_vcpu_put_sysregs_vhe - Restore host system registers to the physical CPU
98+
* __vcpu_put_switch_syregs - Restore host system registers to the physical CPU
10199
*
102100
* @vcpu: The VCPU pointer
103101
*
@@ -107,13 +105,12 @@ void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu)
107105
* and deferring saving system register state until we're no longer running the
108106
* VCPU avoids having to save them on every exit from the VM.
109107
*/
110-
void kvm_vcpu_put_sysregs_vhe(struct kvm_vcpu *vcpu)
108+
void __vcpu_put_switch_sysregs(struct kvm_vcpu *vcpu)
111109
{
112110
struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
113111
struct kvm_cpu_context *host_ctxt;
114112

115113
host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
116-
deactivate_traps_vhe_put(vcpu);
117114

118115
__sysreg_save_el1_state(guest_ctxt);
119116
__sysreg_save_user_state(guest_ctxt);

0 commit comments

Comments
 (0)