@@ -274,12 +274,23 @@ void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu)
274
274
irq_work_sync (& vcpu -> arch .pmu .overflow_work );
275
275
}
276
276
277
- bool kvm_pmu_counter_is_hyp (struct kvm_vcpu * vcpu , unsigned int idx )
277
+ static u64 kvm_pmu_hyp_counter_mask (struct kvm_vcpu * vcpu )
278
278
{
279
- unsigned int hpmn ;
279
+ unsigned int hpmn , n ;
280
280
281
- if (!vcpu_has_nv (vcpu ) || idx == ARMV8_PMU_CYCLE_IDX )
282
- return false;
281
+ if (!vcpu_has_nv (vcpu ))
282
+ return 0 ;
283
+
284
+ hpmn = SYS_FIELD_GET (MDCR_EL2 , HPMN , __vcpu_sys_reg (vcpu , MDCR_EL2 ));
285
+ n = vcpu -> kvm -> arch .pmcr_n ;
286
+
287
+ /*
288
+ * Programming HPMN to a value greater than PMCR_EL0.N is
289
+ * CONSTRAINED UNPREDICTABLE. Make the implementation choice that an
290
+ * UNKNOWN number of counters (in our case, zero) are reserved for EL2.
291
+ */
292
+ if (hpmn >= n )
293
+ return 0 ;
283
294
284
295
/*
285
296
* Programming HPMN=0 is CONSTRAINED UNPREDICTABLE if FEAT_HPMN0 isn't
@@ -288,20 +299,22 @@ bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu, unsigned int idx)
288
299
* implementation choice that all counters are included in the second
289
300
* range reserved for EL2/EL3.
290
301
*/
291
- hpmn = SYS_FIELD_GET (MDCR_EL2 , HPMN , __vcpu_sys_reg (vcpu , MDCR_EL2 ));
292
- return idx >= hpmn ;
302
+ return GENMASK (n - 1 , hpmn );
303
+ }
304
+
305
+ bool kvm_pmu_counter_is_hyp (struct kvm_vcpu * vcpu , unsigned int idx )
306
+ {
307
+ return kvm_pmu_hyp_counter_mask (vcpu ) & BIT (idx );
293
308
}
294
309
295
310
u64 kvm_pmu_accessible_counter_mask (struct kvm_vcpu * vcpu )
296
311
{
297
312
u64 mask = kvm_pmu_implemented_counter_mask (vcpu );
298
- u64 hpmn ;
299
313
300
314
if (!vcpu_has_nv (vcpu ) || vcpu_is_el2 (vcpu ))
301
315
return mask ;
302
316
303
- hpmn = SYS_FIELD_GET (MDCR_EL2 , HPMN , __vcpu_sys_reg (vcpu , MDCR_EL2 ));
304
- return mask & ~GENMASK (vcpu -> kvm -> arch .pmcr_n - 1 , hpmn );
317
+ return mask & ~kvm_pmu_hyp_counter_mask (vcpu );
305
318
}
306
319
307
320
u64 kvm_pmu_implemented_counter_mask (struct kvm_vcpu * vcpu )
@@ -375,14 +388,30 @@ void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
375
388
}
376
389
}
377
390
378
- static u64 kvm_pmu_overflow_status (struct kvm_vcpu * vcpu )
391
+ /*
392
+ * Returns the PMU overflow state, which is true if there exists an event
393
+ * counter where the values of the global enable control, PMOVSSET_EL0[n], and
394
+ * PMINTENSET_EL1[n] are all 1.
395
+ */
396
+ static bool kvm_pmu_overflow_status (struct kvm_vcpu * vcpu )
379
397
{
380
- u64 reg = 0 ;
398
+ u64 reg = __vcpu_sys_reg ( vcpu , PMOVSSET_EL0 ) ;
381
399
382
- if ((kvm_vcpu_read_pmcr (vcpu ) & ARMV8_PMU_PMCR_E )) {
383
- reg = __vcpu_sys_reg (vcpu , PMOVSSET_EL0 );
384
- reg &= __vcpu_sys_reg (vcpu , PMINTENSET_EL1 );
385
- }
400
+ reg &= __vcpu_sys_reg (vcpu , PMINTENSET_EL1 );
401
+
402
+ /*
403
+ * PMCR_EL0.E is the global enable control for event counters available
404
+ * to EL0 and EL1.
405
+ */
406
+ if (!(kvm_vcpu_read_pmcr (vcpu ) & ARMV8_PMU_PMCR_E ))
407
+ reg &= kvm_pmu_hyp_counter_mask (vcpu );
408
+
409
+ /*
410
+ * Otherwise, MDCR_EL2.HPME is the global enable control for event
411
+ * counters reserved for EL2.
412
+ */
413
+ if (!(vcpu_read_sys_reg (vcpu , MDCR_EL2 ) & MDCR_EL2_HPME ))
414
+ reg &= ~kvm_pmu_hyp_counter_mask (vcpu );
386
415
387
416
return reg ;
388
417
}
@@ -395,7 +424,7 @@ static void kvm_pmu_update_state(struct kvm_vcpu *vcpu)
395
424
if (!kvm_vcpu_has_pmu (vcpu ))
396
425
return ;
397
426
398
- overflow = !! kvm_pmu_overflow_status (vcpu );
427
+ overflow = kvm_pmu_overflow_status (vcpu );
399
428
if (pmu -> irq_level == overflow )
400
429
return ;
401
430
0 commit comments