Skip to content

Commit 484c22d

Browse files
committed
Merge tag 'kvmarm-fixes-5.18-2' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD
KVM/arm64 fixes for 5.18, take #2 - Take care of faults occuring between the PARange and IPA range by injecting an exception - Fix S2 faults taken from a host EL0 in protected mode - Work around Oops caused by a PMU access from a 32bit guest when PMU has been created. This is a temporary bodge until we fix it for good.
2 parents e852be8 + 85ea6b1 commit 484c22d

File tree

5 files changed

+79
-10
lines changed

5 files changed

+79
-10
lines changed

arch/arm64/include/asm/kvm_emulate.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,7 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu);
4040
void kvm_inject_vabt(struct kvm_vcpu *vcpu);
4141
void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
4242
void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
43+
void kvm_inject_size_fault(struct kvm_vcpu *vcpu);
4344

4445
void kvm_vcpu_wfi(struct kvm_vcpu *vcpu);
4546

arch/arm64/kvm/hyp/nvhe/host.S

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -198,15 +198,15 @@ SYM_CODE_START(__kvm_hyp_host_vector)
198198
invalid_host_el2_vect // FIQ EL2h
199199
invalid_host_el2_vect // Error EL2h
200200

201-
host_el1_sync_vect // Synchronous 64-bit EL1
202-
invalid_host_el1_vect // IRQ 64-bit EL1
203-
invalid_host_el1_vect // FIQ 64-bit EL1
204-
invalid_host_el1_vect // Error 64-bit EL1
205-
206-
invalid_host_el1_vect // Synchronous 32-bit EL1
207-
invalid_host_el1_vect // IRQ 32-bit EL1
208-
invalid_host_el1_vect // FIQ 32-bit EL1
209-
invalid_host_el1_vect // Error 32-bit EL1
201+
host_el1_sync_vect // Synchronous 64-bit EL1/EL0
202+
invalid_host_el1_vect // IRQ 64-bit EL1/EL0
203+
invalid_host_el1_vect // FIQ 64-bit EL1/EL0
204+
invalid_host_el1_vect // Error 64-bit EL1/EL0
205+
206+
host_el1_sync_vect // Synchronous 32-bit EL1/EL0
207+
invalid_host_el1_vect // IRQ 32-bit EL1/EL0
208+
invalid_host_el1_vect // FIQ 32-bit EL1/EL0
209+
invalid_host_el1_vect // Error 32-bit EL1/EL0
210210
SYM_CODE_END(__kvm_hyp_host_vector)
211211

212212
/*

arch/arm64/kvm/inject_fault.c

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -145,6 +145,34 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
145145
inject_abt64(vcpu, true, addr);
146146
}
147147

148+
void kvm_inject_size_fault(struct kvm_vcpu *vcpu)
149+
{
150+
unsigned long addr, esr;
151+
152+
addr = kvm_vcpu_get_fault_ipa(vcpu);
153+
addr |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0);
154+
155+
if (kvm_vcpu_trap_is_iabt(vcpu))
156+
kvm_inject_pabt(vcpu, addr);
157+
else
158+
kvm_inject_dabt(vcpu, addr);
159+
160+
/*
161+
* If AArch64 or LPAE, set FSC to 0 to indicate an Address
162+
* Size Fault at level 0, as if exceeding PARange.
163+
*
164+
* Non-LPAE guests will only get the external abort, as there
165+
* is no way to to describe the ASF.
166+
*/
167+
if (vcpu_el1_is_32bit(vcpu) &&
168+
!(vcpu_read_sys_reg(vcpu, TCR_EL1) & TTBCR_EAE))
169+
return;
170+
171+
esr = vcpu_read_sys_reg(vcpu, ESR_EL1);
172+
esr &= ~GENMASK_ULL(5, 0);
173+
vcpu_write_sys_reg(vcpu, esr, ESR_EL1);
174+
}
175+
148176
/**
149177
* kvm_inject_undefined - inject an undefined instruction into the guest
150178
* @vcpu: The vCPU in which to inject the exception

arch/arm64/kvm/mmu.c

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1337,6 +1337,25 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
13371337
fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
13381338
is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
13391339

1340+
if (fault_status == FSC_FAULT) {
1341+
/* Beyond sanitised PARange (which is the IPA limit) */
1342+
if (fault_ipa >= BIT_ULL(get_kvm_ipa_limit())) {
1343+
kvm_inject_size_fault(vcpu);
1344+
return 1;
1345+
}
1346+
1347+
/* Falls between the IPA range and the PARange? */
1348+
if (fault_ipa >= BIT_ULL(vcpu->arch.hw_mmu->pgt->ia_bits)) {
1349+
fault_ipa |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0);
1350+
1351+
if (is_iabt)
1352+
kvm_inject_pabt(vcpu, fault_ipa);
1353+
else
1354+
kvm_inject_dabt(vcpu, fault_ipa);
1355+
return 1;
1356+
}
1357+
}
1358+
13401359
/* Synchronous External Abort? */
13411360
if (kvm_vcpu_abt_issea(vcpu)) {
13421361
/*

arch/arm64/kvm/pmu-emul.c

Lines changed: 22 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -177,6 +177,9 @@ u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
177177
struct kvm_pmu *pmu = &vcpu->arch.pmu;
178178
struct kvm_pmc *pmc = &pmu->pmc[select_idx];
179179

180+
if (!kvm_vcpu_has_pmu(vcpu))
181+
return 0;
182+
180183
counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
181184

182185
if (kvm_pmu_pmc_is_chained(pmc) &&
@@ -198,6 +201,9 @@ void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
198201
{
199202
u64 reg;
200203

204+
if (!kvm_vcpu_has_pmu(vcpu))
205+
return;
206+
201207
reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
202208
? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx;
203209
__vcpu_sys_reg(vcpu, reg) += (s64)val - kvm_pmu_get_counter_value(vcpu, select_idx);
@@ -322,6 +328,9 @@ void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
322328
struct kvm_pmu *pmu = &vcpu->arch.pmu;
323329
struct kvm_pmc *pmc;
324330

331+
if (!kvm_vcpu_has_pmu(vcpu))
332+
return;
333+
325334
if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val)
326335
return;
327336

@@ -357,7 +366,7 @@ void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
357366
struct kvm_pmu *pmu = &vcpu->arch.pmu;
358367
struct kvm_pmc *pmc;
359368

360-
if (!val)
369+
if (!kvm_vcpu_has_pmu(vcpu) || !val)
361370
return;
362371

363372
for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
@@ -527,6 +536,9 @@ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
527536
struct kvm_pmu *pmu = &vcpu->arch.pmu;
528537
int i;
529538

539+
if (!kvm_vcpu_has_pmu(vcpu))
540+
return;
541+
530542
if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E))
531543
return;
532544

@@ -576,6 +588,9 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
576588
{
577589
int i;
578590

591+
if (!kvm_vcpu_has_pmu(vcpu))
592+
return;
593+
579594
if (val & ARMV8_PMU_PMCR_E) {
580595
kvm_pmu_enable_counter_mask(vcpu,
581596
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0));
@@ -739,6 +754,9 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
739754
{
740755
u64 reg, mask;
741756

757+
if (!kvm_vcpu_has_pmu(vcpu))
758+
return;
759+
742760
mask = ARMV8_PMU_EVTYPE_MASK;
743761
mask &= ~ARMV8_PMU_EVTYPE_EVENT;
744762
mask |= kvm_pmu_event_mask(vcpu->kvm);
@@ -827,6 +845,9 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
827845
u64 val, mask = 0;
828846
int base, i, nr_events;
829847

848+
if (!kvm_vcpu_has_pmu(vcpu))
849+
return 0;
850+
830851
if (!pmceid1) {
831852
val = read_sysreg(pmceid0_el0);
832853
base = 0;

0 commit comments

Comments
 (0)