@@ -89,7 +89,7 @@ static bool kvm_pmc_is_64bit(struct kvm_pmc *pmc)
89
89
90
90
static bool kvm_pmc_has_64bit_overflow (struct kvm_pmc * pmc )
91
91
{
92
- u64 val = __vcpu_sys_reg (kvm_pmc_to_vcpu (pmc ), PMCR_EL0 );
92
+ u64 val = kvm_vcpu_read_pmcr (kvm_pmc_to_vcpu (pmc ));
93
93
94
94
return (pmc -> idx < ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LP )) ||
95
95
(pmc -> idx == ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LC ));
@@ -267,7 +267,7 @@ void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu)
267
267
268
268
u64 kvm_pmu_valid_counter_mask (struct kvm_vcpu * vcpu )
269
269
{
270
- u64 val = __vcpu_sys_reg (vcpu , PMCR_EL0 ) >> ARMV8_PMU_PMCR_N_SHIFT ;
270
+ u64 val = kvm_vcpu_read_pmcr (vcpu ) >> ARMV8_PMU_PMCR_N_SHIFT ;
271
271
272
272
val &= ARMV8_PMU_PMCR_N_MASK ;
273
273
if (val == 0 )
@@ -289,7 +289,7 @@ void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
289
289
if (!kvm_vcpu_has_pmu (vcpu ))
290
290
return ;
291
291
292
- if (!(__vcpu_sys_reg (vcpu , PMCR_EL0 ) & ARMV8_PMU_PMCR_E ) || !val )
292
+ if (!(kvm_vcpu_read_pmcr (vcpu ) & ARMV8_PMU_PMCR_E ) || !val )
293
293
return ;
294
294
295
295
for (i = 0 ; i < ARMV8_PMU_MAX_COUNTERS ; i ++ ) {
@@ -341,7 +341,7 @@ static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
341
341
{
342
342
u64 reg = 0 ;
343
343
344
- if ((__vcpu_sys_reg (vcpu , PMCR_EL0 ) & ARMV8_PMU_PMCR_E )) {
344
+ if ((kvm_vcpu_read_pmcr (vcpu ) & ARMV8_PMU_PMCR_E )) {
345
345
reg = __vcpu_sys_reg (vcpu , PMOVSSET_EL0 );
346
346
reg &= __vcpu_sys_reg (vcpu , PMCNTENSET_EL0 );
347
347
reg &= __vcpu_sys_reg (vcpu , PMINTENSET_EL1 );
@@ -443,7 +443,7 @@ static void kvm_pmu_counter_increment(struct kvm_vcpu *vcpu,
443
443
{
444
444
int i ;
445
445
446
- if (!(__vcpu_sys_reg (vcpu , PMCR_EL0 ) & ARMV8_PMU_PMCR_E ))
446
+ if (!(kvm_vcpu_read_pmcr (vcpu ) & ARMV8_PMU_PMCR_E ))
447
447
return ;
448
448
449
449
/* Weed out disabled counters */
@@ -586,7 +586,7 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
586
586
static bool kvm_pmu_counter_is_enabled (struct kvm_pmc * pmc )
587
587
{
588
588
struct kvm_vcpu * vcpu = kvm_pmc_to_vcpu (pmc );
589
- return (__vcpu_sys_reg (vcpu , PMCR_EL0 ) & ARMV8_PMU_PMCR_E ) &&
589
+ return (kvm_vcpu_read_pmcr (vcpu ) & ARMV8_PMU_PMCR_E ) &&
590
590
(__vcpu_sys_reg (vcpu , PMCNTENSET_EL0 ) & BIT (pmc -> idx ));
591
591
}
592
592
@@ -735,10 +735,9 @@ static struct arm_pmu *kvm_pmu_probe_armpmu(void)
735
735
* It is still necessary to get a valid cpu, though, to probe for the
736
736
* default PMU instance as userspace is not required to specify a PMU
737
737
* type. In order to uphold the preexisting behavior KVM selects the
738
- * PMU instance for the core where the first call to the
739
- * KVM_ARM_VCPU_PMU_V3_CTRL attribute group occurs. A dependent use case
740
- * would be a user with disdain of all things big.LITTLE that affines
741
- * the VMM to a particular cluster of cores.
738
+ * PMU instance for the core during vcpu init. A dependent use
739
+ * case would be a user with disdain of all things big.LITTLE that
740
+ * affines the VMM to a particular cluster of cores.
742
741
*
743
742
* In any case, userspace should just do the sane thing and use the UAPI
744
743
* to select a PMU type directly. But, be wary of the baggage being
@@ -804,6 +803,17 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
804
803
return val & mask ;
805
804
}
806
805
806
+ void kvm_vcpu_reload_pmu (struct kvm_vcpu * vcpu )
807
+ {
808
+ u64 mask = kvm_pmu_valid_counter_mask (vcpu );
809
+
810
+ kvm_pmu_handle_pmcr (vcpu , kvm_vcpu_read_pmcr (vcpu ));
811
+
812
+ __vcpu_sys_reg (vcpu , PMOVSSET_EL0 ) &= mask ;
813
+ __vcpu_sys_reg (vcpu , PMINTENSET_EL1 ) &= mask ;
814
+ __vcpu_sys_reg (vcpu , PMCNTENSET_EL0 ) &= mask ;
815
+ }
816
+
807
817
int kvm_arm_pmu_v3_enable (struct kvm_vcpu * vcpu )
808
818
{
809
819
if (!kvm_vcpu_has_pmu (vcpu ))
@@ -892,6 +902,52 @@ static bool pmu_irq_is_valid(struct kvm *kvm, int irq)
892
902
return true;
893
903
}
894
904
905
+ /**
906
+ * kvm_arm_pmu_get_max_counters - Return the max number of PMU counters.
907
+ * @kvm: The kvm pointer
908
+ */
909
+ u8 kvm_arm_pmu_get_max_counters (struct kvm * kvm )
910
+ {
911
+ struct arm_pmu * arm_pmu = kvm -> arch .arm_pmu ;
912
+
913
+ /*
914
+ * The arm_pmu->num_events considers the cycle counter as well.
915
+ * Ignore that and return only the general-purpose counters.
916
+ */
917
+ return arm_pmu -> num_events - 1 ;
918
+ }
919
+
920
+ static void kvm_arm_set_pmu (struct kvm * kvm , struct arm_pmu * arm_pmu )
921
+ {
922
+ lockdep_assert_held (& kvm -> arch .config_lock );
923
+
924
+ kvm -> arch .arm_pmu = arm_pmu ;
925
+ kvm -> arch .pmcr_n = kvm_arm_pmu_get_max_counters (kvm );
926
+ }
927
+
928
+ /**
929
+ * kvm_arm_set_default_pmu - No PMU set, get the default one.
930
+ * @kvm: The kvm pointer
931
+ *
932
+ * The observant among you will notice that the supported_cpus
933
+ * mask does not get updated for the default PMU even though it
934
+ * is quite possible the selected instance supports only a
935
+ * subset of cores in the system. This is intentional, and
936
+ * upholds the preexisting behavior on heterogeneous systems
937
+ * where vCPUs can be scheduled on any core but the guest
938
+ * counters could stop working.
939
+ */
940
+ int kvm_arm_set_default_pmu (struct kvm * kvm )
941
+ {
942
+ struct arm_pmu * arm_pmu = kvm_pmu_probe_armpmu ();
943
+
944
+ if (!arm_pmu )
945
+ return - ENODEV ;
946
+
947
+ kvm_arm_set_pmu (kvm , arm_pmu );
948
+ return 0 ;
949
+ }
950
+
895
951
static int kvm_arm_pmu_v3_set_pmu (struct kvm_vcpu * vcpu , int pmu_id )
896
952
{
897
953
struct kvm * kvm = vcpu -> kvm ;
@@ -911,7 +967,7 @@ static int kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu *vcpu, int pmu_id)
911
967
break ;
912
968
}
913
969
914
- kvm -> arch . arm_pmu = arm_pmu ;
970
+ kvm_arm_set_pmu ( kvm , arm_pmu ) ;
915
971
cpumask_copy (kvm -> arch .supported_cpus , & arm_pmu -> supported_cpus );
916
972
ret = 0 ;
917
973
break ;
@@ -934,23 +990,6 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
934
990
if (vcpu -> arch .pmu .created )
935
991
return - EBUSY ;
936
992
937
- if (!kvm -> arch .arm_pmu ) {
938
- /*
939
- * No PMU set, get the default one.
940
- *
941
- * The observant among you will notice that the supported_cpus
942
- * mask does not get updated for the default PMU even though it
943
- * is quite possible the selected instance supports only a
944
- * subset of cores in the system. This is intentional, and
945
- * upholds the preexisting behavior on heterogeneous systems
946
- * where vCPUs can be scheduled on any core but the guest
947
- * counters could stop working.
948
- */
949
- kvm -> arch .arm_pmu = kvm_pmu_probe_armpmu ();
950
- if (!kvm -> arch .arm_pmu )
951
- return - ENODEV ;
952
- }
953
-
954
993
switch (attr -> attr ) {
955
994
case KVM_ARM_VCPU_PMU_V3_IRQ : {
956
995
int __user * uaddr = (int __user * )(long )attr -> addr ;
@@ -1090,3 +1129,15 @@ u8 kvm_arm_pmu_get_pmuver_limit(void)
1090
1129
ID_AA64DFR0_EL1_PMUVer_V3P5 );
1091
1130
return FIELD_GET (ARM64_FEATURE_MASK (ID_AA64DFR0_EL1_PMUVer ), tmp );
1092
1131
}
1132
+
1133
+ /**
1134
+ * kvm_vcpu_read_pmcr - Read PMCR_EL0 register for the vCPU
1135
+ * @vcpu: The vcpu pointer
1136
+ */
1137
+ u64 kvm_vcpu_read_pmcr (struct kvm_vcpu * vcpu )
1138
+ {
1139
+ u64 pmcr = __vcpu_sys_reg (vcpu , PMCR_EL0 ) &
1140
+ ~(ARMV8_PMU_PMCR_N_MASK << ARMV8_PMU_PMCR_N_SHIFT );
1141
+
1142
+ return pmcr | ((u64 )vcpu -> kvm -> arch .pmcr_n << ARMV8_PMU_PMCR_N_SHIFT );
1143
+ }
0 commit comments