Skip to content

Commit 123f42f

Browse files
committed
Merge branch kvm-arm64/pmu_pmcr_n into kvmarm/next
* kvm-arm64/pmu_pmcr_n: : User-defined PMC limit, courtesy Raghavendra Rao Ananta : : Certain VMMs may want to reserve some PMCs for host use while running a : KVM guest. This was a bit difficult before, as KVM advertised all : supported counters to the guest. Userspace can now limit the number of : advertised PMCs by writing to PMCR_EL0.N, as KVM's sysreg and PMU : emulation enforce the specified limit for handling guest accesses. KVM: selftests: aarch64: vPMU test for validating user accesses KVM: selftests: aarch64: vPMU register test for unimplemented counters KVM: selftests: aarch64: vPMU register test for implemented counters KVM: selftests: aarch64: Introduce vpmu_counter_access test tools: Import arm_pmuv3.h KVM: arm64: PMU: Allow userspace to limit PMCR_EL0.N for the guest KVM: arm64: Sanitize PM{C,I}NTEN{SET,CLR}, PMOVS{SET,CLR} before first run KVM: arm64: Add {get,set}_user for PM{C,I}NTEN{SET,CLR}, PMOVS{SET,CLR} KVM: arm64: PMU: Set PMCR_EL0.N for vCPU based on the associated PMU KVM: arm64: PMU: Add a helper to read a vCPU's PMCR_EL0 KVM: arm64: Select default PMU in KVM_ARM_VCPU_INIT handler KVM: arm64: PMU: Introduce helpers to set the guest's PMU Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
2 parents 53ce49e + 62708be commit 123f42f

File tree

9 files changed

+1204
-56
lines changed

9 files changed

+1204
-56
lines changed

arch/arm64/include/asm/kvm_host.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -290,6 +290,9 @@ struct kvm_arch {
290290

291291
cpumask_var_t supported_cpus;
292292

293+
/* PMCR_EL0.N value for the guest */
294+
u8 pmcr_n;
295+
293296
/* Hypercall features firmware registers' descriptor */
294297
struct kvm_smccc_features smccc_feat;
295298
struct maple_tree smccc_filter;

arch/arm64/kvm/arm.c

Lines changed: 20 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -857,8 +857,7 @@ static int check_vcpu_requests(struct kvm_vcpu *vcpu)
857857
}
858858

859859
if (kvm_check_request(KVM_REQ_RELOAD_PMU, vcpu))
860-
kvm_pmu_handle_pmcr(vcpu,
861-
__vcpu_sys_reg(vcpu, PMCR_EL0));
860+
kvm_vcpu_reload_pmu(vcpu);
862861

863862
if (kvm_check_request(KVM_REQ_RESYNC_PMU_EL0, vcpu))
864863
kvm_vcpu_pmu_restore_guest(vcpu);
@@ -1319,6 +1318,21 @@ static bool kvm_vcpu_init_changed(struct kvm_vcpu *vcpu,
13191318
KVM_VCPU_MAX_FEATURES);
13201319
}
13211320

1321+
static int kvm_setup_vcpu(struct kvm_vcpu *vcpu)
1322+
{
1323+
struct kvm *kvm = vcpu->kvm;
1324+
int ret = 0;
1325+
1326+
/*
1327+
* When the vCPU has a PMU, but no PMU is set for the guest
1328+
* yet, set the default one.
1329+
*/
1330+
if (kvm_vcpu_has_pmu(vcpu) && !kvm->arch.arm_pmu)
1331+
ret = kvm_arm_set_default_pmu(kvm);
1332+
1333+
return ret;
1334+
}
1335+
13221336
static int __kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
13231337
const struct kvm_vcpu_init *init)
13241338
{
@@ -1334,6 +1348,10 @@ static int __kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
13341348

13351349
bitmap_copy(kvm->arch.vcpu_features, &features, KVM_VCPU_MAX_FEATURES);
13361350

1351+
ret = kvm_setup_vcpu(vcpu);
1352+
if (ret)
1353+
goto out_unlock;
1354+
13371355
/* Now we know what it is, we can reset it. */
13381356
kvm_reset_vcpu(vcpu);
13391357

arch/arm64/kvm/pmu-emul.c

Lines changed: 79 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -89,7 +89,7 @@ static bool kvm_pmc_is_64bit(struct kvm_pmc *pmc)
8989

9090
static bool kvm_pmc_has_64bit_overflow(struct kvm_pmc *pmc)
9191
{
92-
u64 val = __vcpu_sys_reg(kvm_pmc_to_vcpu(pmc), PMCR_EL0);
92+
u64 val = kvm_vcpu_read_pmcr(kvm_pmc_to_vcpu(pmc));
9393

9494
return (pmc->idx < ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LP)) ||
9595
(pmc->idx == ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LC));
@@ -267,7 +267,7 @@ void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu)
267267

268268
u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
269269
{
270-
u64 val = __vcpu_sys_reg(vcpu, PMCR_EL0) >> ARMV8_PMU_PMCR_N_SHIFT;
270+
u64 val = kvm_vcpu_read_pmcr(vcpu) >> ARMV8_PMU_PMCR_N_SHIFT;
271271

272272
val &= ARMV8_PMU_PMCR_N_MASK;
273273
if (val == 0)
@@ -289,7 +289,7 @@ void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
289289
if (!kvm_vcpu_has_pmu(vcpu))
290290
return;
291291

292-
if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val)
292+
if (!(kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E) || !val)
293293
return;
294294

295295
for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
@@ -341,7 +341,7 @@ static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
341341
{
342342
u64 reg = 0;
343343

344-
if ((__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) {
344+
if ((kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E)) {
345345
reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
346346
reg &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
347347
reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
@@ -443,7 +443,7 @@ static void kvm_pmu_counter_increment(struct kvm_vcpu *vcpu,
443443
{
444444
int i;
445445

446-
if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E))
446+
if (!(kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E))
447447
return;
448448

449449
/* Weed out disabled counters */
@@ -586,7 +586,7 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
586586
static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc)
587587
{
588588
struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
589-
return (__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) &&
589+
return (kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E) &&
590590
(__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(pmc->idx));
591591
}
592592

@@ -735,10 +735,9 @@ static struct arm_pmu *kvm_pmu_probe_armpmu(void)
735735
* It is still necessary to get a valid cpu, though, to probe for the
736736
* default PMU instance as userspace is not required to specify a PMU
737737
* type. In order to uphold the preexisting behavior KVM selects the
738-
* PMU instance for the core where the first call to the
739-
* KVM_ARM_VCPU_PMU_V3_CTRL attribute group occurs. A dependent use case
740-
* would be a user with disdain of all things big.LITTLE that affines
741-
* the VMM to a particular cluster of cores.
738+
* PMU instance for the core during vcpu init. A dependent use
739+
* case would be a user with disdain of all things big.LITTLE that
740+
* affines the VMM to a particular cluster of cores.
742741
*
743742
* In any case, userspace should just do the sane thing and use the UAPI
744743
* to select a PMU type directly. But, be wary of the baggage being
@@ -804,6 +803,17 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
804803
return val & mask;
805804
}
806805

806+
void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu)
807+
{
808+
u64 mask = kvm_pmu_valid_counter_mask(vcpu);
809+
810+
kvm_pmu_handle_pmcr(vcpu, kvm_vcpu_read_pmcr(vcpu));
811+
812+
__vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= mask;
813+
__vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= mask;
814+
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= mask;
815+
}
816+
807817
int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
808818
{
809819
if (!kvm_vcpu_has_pmu(vcpu))
@@ -892,6 +902,52 @@ static bool pmu_irq_is_valid(struct kvm *kvm, int irq)
892902
return true;
893903
}
894904

905+
/**
906+
* kvm_arm_pmu_get_max_counters - Return the max number of PMU counters.
907+
* @kvm: The kvm pointer
908+
*/
909+
u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm)
910+
{
911+
struct arm_pmu *arm_pmu = kvm->arch.arm_pmu;
912+
913+
/*
914+
* The arm_pmu->num_events considers the cycle counter as well.
915+
* Ignore that and return only the general-purpose counters.
916+
*/
917+
return arm_pmu->num_events - 1;
918+
}
919+
920+
static void kvm_arm_set_pmu(struct kvm *kvm, struct arm_pmu *arm_pmu)
921+
{
922+
lockdep_assert_held(&kvm->arch.config_lock);
923+
924+
kvm->arch.arm_pmu = arm_pmu;
925+
kvm->arch.pmcr_n = kvm_arm_pmu_get_max_counters(kvm);
926+
}
927+
928+
/**
929+
* kvm_arm_set_default_pmu - No PMU set, get the default one.
930+
* @kvm: The kvm pointer
931+
*
932+
* The observant among you will notice that the supported_cpus
933+
* mask does not get updated for the default PMU even though it
934+
* is quite possible the selected instance supports only a
935+
* subset of cores in the system. This is intentional, and
936+
* upholds the preexisting behavior on heterogeneous systems
937+
* where vCPUs can be scheduled on any core but the guest
938+
* counters could stop working.
939+
*/
940+
int kvm_arm_set_default_pmu(struct kvm *kvm)
941+
{
942+
struct arm_pmu *arm_pmu = kvm_pmu_probe_armpmu();
943+
944+
if (!arm_pmu)
945+
return -ENODEV;
946+
947+
kvm_arm_set_pmu(kvm, arm_pmu);
948+
return 0;
949+
}
950+
895951
static int kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu *vcpu, int pmu_id)
896952
{
897953
struct kvm *kvm = vcpu->kvm;
@@ -911,7 +967,7 @@ static int kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu *vcpu, int pmu_id)
911967
break;
912968
}
913969

914-
kvm->arch.arm_pmu = arm_pmu;
970+
kvm_arm_set_pmu(kvm, arm_pmu);
915971
cpumask_copy(kvm->arch.supported_cpus, &arm_pmu->supported_cpus);
916972
ret = 0;
917973
break;
@@ -934,23 +990,6 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
934990
if (vcpu->arch.pmu.created)
935991
return -EBUSY;
936992

937-
if (!kvm->arch.arm_pmu) {
938-
/*
939-
* No PMU set, get the default one.
940-
*
941-
* The observant among you will notice that the supported_cpus
942-
* mask does not get updated for the default PMU even though it
943-
* is quite possible the selected instance supports only a
944-
* subset of cores in the system. This is intentional, and
945-
* upholds the preexisting behavior on heterogeneous systems
946-
* where vCPUs can be scheduled on any core but the guest
947-
* counters could stop working.
948-
*/
949-
kvm->arch.arm_pmu = kvm_pmu_probe_armpmu();
950-
if (!kvm->arch.arm_pmu)
951-
return -ENODEV;
952-
}
953-
954993
switch (attr->attr) {
955994
case KVM_ARM_VCPU_PMU_V3_IRQ: {
956995
int __user *uaddr = (int __user *)(long)attr->addr;
@@ -1090,3 +1129,15 @@ u8 kvm_arm_pmu_get_pmuver_limit(void)
10901129
ID_AA64DFR0_EL1_PMUVer_V3P5);
10911130
return FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), tmp);
10921131
}
1132+
1133+
/**
1134+
* kvm_vcpu_read_pmcr - Read PMCR_EL0 register for the vCPU
1135+
* @vcpu: The vcpu pointer
1136+
*/
1137+
u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu)
1138+
{
1139+
u64 pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0) &
1140+
~(ARMV8_PMU_PMCR_N_MASK << ARMV8_PMU_PMCR_N_SHIFT);
1141+
1142+
return pmcr | ((u64)vcpu->kvm->arch.pmcr_n << ARMV8_PMU_PMCR_N_SHIFT);
1143+
}

0 commit comments

Comments
 (0)