@@ -111,6 +111,11 @@ static u32 counter_index_to_evtreg(u64 idx)
111
111
return (idx == ARMV8_PMU_CYCLE_IDX ) ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + idx ;
112
112
}
113
113
114
+ static u64 kvm_pmc_read_evtreg (const struct kvm_pmc * pmc )
115
+ {
116
+ return __vcpu_sys_reg (kvm_pmc_to_vcpu (pmc ), counter_index_to_evtreg (pmc -> idx ));
117
+ }
118
+
114
119
static u64 kvm_pmu_get_pmc_value (struct kvm_pmc * pmc )
115
120
{
116
121
struct kvm_vcpu * vcpu = kvm_pmc_to_vcpu (pmc );
@@ -619,6 +624,24 @@ static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc)
619
624
(__vcpu_sys_reg (vcpu , PMCNTENSET_EL0 ) & BIT (pmc -> idx ));
620
625
}
621
626
627
+ static bool kvm_pmc_counts_at_el0 (struct kvm_pmc * pmc )
628
+ {
629
+ u64 evtreg = kvm_pmc_read_evtreg (pmc );
630
+ bool nsu = evtreg & ARMV8_PMU_EXCLUDE_NS_EL0 ;
631
+ bool u = evtreg & ARMV8_PMU_EXCLUDE_EL0 ;
632
+
633
+ return u == nsu ;
634
+ }
635
+
636
+ static bool kvm_pmc_counts_at_el1 (struct kvm_pmc * pmc )
637
+ {
638
+ u64 evtreg = kvm_pmc_read_evtreg (pmc );
639
+ bool nsk = evtreg & ARMV8_PMU_EXCLUDE_NS_EL1 ;
640
+ bool p = evtreg & ARMV8_PMU_EXCLUDE_EL1 ;
641
+
642
+ return p == nsk ;
643
+ }
644
+
622
645
/**
623
646
* kvm_pmu_create_perf_event - create a perf event for a counter
624
647
* @pmc: Counter context
@@ -629,17 +652,15 @@ static void kvm_pmu_create_perf_event(struct kvm_pmc *pmc)
629
652
struct arm_pmu * arm_pmu = vcpu -> kvm -> arch .arm_pmu ;
630
653
struct perf_event * event ;
631
654
struct perf_event_attr attr ;
632
- u64 eventsel , reg , data ;
633
- bool p , u , nsk , nsu ;
655
+ u64 eventsel , evtreg ;
634
656
635
- reg = counter_index_to_evtreg (pmc -> idx );
636
- data = __vcpu_sys_reg (vcpu , reg );
657
+ evtreg = kvm_pmc_read_evtreg (pmc );
637
658
638
659
kvm_pmu_stop_counter (pmc );
639
660
if (pmc -> idx == ARMV8_PMU_CYCLE_IDX )
640
661
eventsel = ARMV8_PMUV3_PERFCTR_CPU_CYCLES ;
641
662
else
642
- eventsel = data & kvm_pmu_event_mask (vcpu -> kvm );
663
+ eventsel = evtreg & kvm_pmu_event_mask (vcpu -> kvm );
643
664
644
665
/*
645
666
* Neither SW increment nor chained events need to be backed
@@ -657,18 +678,13 @@ static void kvm_pmu_create_perf_event(struct kvm_pmc *pmc)
657
678
!test_bit (eventsel , vcpu -> kvm -> arch .pmu_filter ))
658
679
return ;
659
680
660
- p = data & ARMV8_PMU_EXCLUDE_EL1 ;
661
- u = data & ARMV8_PMU_EXCLUDE_EL0 ;
662
- nsk = data & ARMV8_PMU_EXCLUDE_NS_EL1 ;
663
- nsu = data & ARMV8_PMU_EXCLUDE_NS_EL0 ;
664
-
665
681
memset (& attr , 0 , sizeof (struct perf_event_attr ));
666
682
attr .type = arm_pmu -> pmu .type ;
667
683
attr .size = sizeof (attr );
668
684
attr .pinned = 1 ;
669
685
attr .disabled = !kvm_pmu_counter_is_enabled (pmc );
670
- attr .exclude_user = ( u != nsu );
671
- attr .exclude_kernel = ( p != nsk );
686
+ attr .exclude_user = ! kvm_pmc_counts_at_el0 ( pmc );
687
+ attr .exclude_kernel = ! kvm_pmc_counts_at_el1 ( pmc );
672
688
attr .exclude_hv = 1 ; /* Don't count EL2 events */
673
689
attr .exclude_host = 1 ; /* Don't count host events */
674
690
attr .config = eventsel ;
0 commit comments