Skip to content

Commit 49661a5

Browse files
committed
Merge tag 'kvmarm-fixes-6.4-3' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD
KVM/arm64 fixes for 6.4, take #3 - Fix the reported address of a watchpoint forwarded to userspace - Fix the freeing of the root of stage-2 page tables - Stop creating spurious PMU events to perform detection of the default PMU and use the existing PMU list instead.
2 parents 26f3149 + 40e54ca commit 49661a5

File tree

5 files changed

+35
-37
lines changed

5 files changed

+35
-37
lines changed

arch/arm64/kvm/hyp/include/hyp/switch.h

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -412,17 +412,21 @@ static bool kvm_hyp_handle_cp15_32(struct kvm_vcpu *vcpu, u64 *exit_code)
412412
return false;
413413
}
414414

415-
static bool kvm_hyp_handle_iabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
415+
static bool kvm_hyp_handle_memory_fault(struct kvm_vcpu *vcpu, u64 *exit_code)
416416
{
417417
if (!__populate_fault_info(vcpu))
418418
return true;
419419

420420
return false;
421421
}
422+
static bool kvm_hyp_handle_iabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
423+
__alias(kvm_hyp_handle_memory_fault);
424+
static bool kvm_hyp_handle_watchpt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
425+
__alias(kvm_hyp_handle_memory_fault);
422426

423427
static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
424428
{
425-
if (!__populate_fault_info(vcpu))
429+
if (kvm_hyp_handle_memory_fault(vcpu, exit_code))
426430
return true;
427431

428432
if (static_branch_unlikely(&vgic_v2_cpuif_trap)) {

arch/arm64/kvm/hyp/nvhe/switch.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -186,6 +186,7 @@ static const exit_handler_fn hyp_exit_handlers[] = {
186186
[ESR_ELx_EC_FP_ASIMD] = kvm_hyp_handle_fpsimd,
187187
[ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low,
188188
[ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low,
189+
[ESR_ELx_EC_WATCHPT_LOW] = kvm_hyp_handle_watchpt_low,
189190
[ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth,
190191
};
191192

@@ -196,6 +197,7 @@ static const exit_handler_fn pvm_exit_handlers[] = {
196197
[ESR_ELx_EC_FP_ASIMD] = kvm_hyp_handle_fpsimd,
197198
[ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low,
198199
[ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low,
200+
[ESR_ELx_EC_WATCHPT_LOW] = kvm_hyp_handle_watchpt_low,
199201
[ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth,
200202
};
201203

arch/arm64/kvm/hyp/pgtable.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1332,4 +1332,7 @@ void kvm_pgtable_stage2_free_removed(struct kvm_pgtable_mm_ops *mm_ops, void *pg
13321332
};
13331333

13341334
WARN_ON(__kvm_pgtable_walk(&data, mm_ops, ptep, level + 1));
1335+
1336+
WARN_ON(mm_ops->page_count(pgtable) != 1);
1337+
mm_ops->put_page(pgtable);
13351338
}

arch/arm64/kvm/hyp/vhe/switch.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -110,6 +110,7 @@ static const exit_handler_fn hyp_exit_handlers[] = {
110110
[ESR_ELx_EC_FP_ASIMD] = kvm_hyp_handle_fpsimd,
111111
[ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low,
112112
[ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low,
113+
[ESR_ELx_EC_WATCHPT_LOW] = kvm_hyp_handle_watchpt_low,
113114
[ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth,
114115
};
115116

arch/arm64/kvm/pmu-emul.c

Lines changed: 23 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -694,45 +694,23 @@ void kvm_host_pmu_init(struct arm_pmu *pmu)
694694

695695
static struct arm_pmu *kvm_pmu_probe_armpmu(void)
696696
{
697-
struct perf_event_attr attr = { };
698-
struct perf_event *event;
699-
struct arm_pmu *pmu = NULL;
700-
701-
/*
702-
* Create a dummy event that only counts user cycles. As we'll never
703-
* leave this function with the event being live, it will never
704-
* count anything. But it allows us to probe some of the PMU
705-
* details. Yes, this is terrible.
706-
*/
707-
attr.type = PERF_TYPE_RAW;
708-
attr.size = sizeof(attr);
709-
attr.pinned = 1;
710-
attr.disabled = 0;
711-
attr.exclude_user = 0;
712-
attr.exclude_kernel = 1;
713-
attr.exclude_hv = 1;
714-
attr.exclude_host = 1;
715-
attr.config = ARMV8_PMUV3_PERFCTR_CPU_CYCLES;
716-
attr.sample_period = GENMASK(63, 0);
697+
struct arm_pmu *tmp, *pmu = NULL;
698+
struct arm_pmu_entry *entry;
699+
int cpu;
717700

718-
event = perf_event_create_kernel_counter(&attr, -1, current,
719-
kvm_pmu_perf_overflow, &attr);
701+
mutex_lock(&arm_pmus_lock);
720702

721-
if (IS_ERR(event)) {
722-
pr_err_once("kvm: pmu event creation failed %ld\n",
723-
PTR_ERR(event));
724-
return NULL;
725-
}
703+
cpu = smp_processor_id();
704+
list_for_each_entry(entry, &arm_pmus, entry) {
705+
tmp = entry->arm_pmu;
726706

727-
if (event->pmu) {
728-
pmu = to_arm_pmu(event->pmu);
729-
if (pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_NI ||
730-
pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
731-
pmu = NULL;
707+
if (cpumask_test_cpu(cpu, &tmp->supported_cpus)) {
708+
pmu = tmp;
709+
break;
710+
}
732711
}
733712

734-
perf_event_disable(event);
735-
perf_event_release_kernel(event);
713+
mutex_unlock(&arm_pmus_lock);
736714

737715
return pmu;
738716
}
@@ -912,7 +890,17 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
912890
return -EBUSY;
913891

914892
if (!kvm->arch.arm_pmu) {
915-
/* No PMU set, get the default one */
893+
/*
894+
* No PMU set, get the default one.
895+
*
896+
* The observant among you will notice that the supported_cpus
897+
* mask does not get updated for the default PMU even though it
898+
* is quite possible the selected instance supports only a
899+
* subset of cores in the system. This is intentional, and
900+
* upholds the preexisting behavior on heterogeneous systems
901+
* where vCPUs can be scheduled on any core but the guest
902+
* counters could stop working.
903+
*/
916904
kvm->arch.arm_pmu = kvm_pmu_probe_armpmu();
917905
if (!kvm->arch.arm_pmu)
918906
return -ENODEV;

0 commit comments

Comments
 (0)