Skip to content

Commit d4e3616

Browse files
Jinrong Liangsean-jc
authored andcommitted
KVM: selftests: Test if event filter meets expectations on fixed counters
Add tests to cover that pmu event_filter works as expected when it's applied to fixed performance counters, even if there is none fixed counter exists (e.g. Intel guest pmu version=1 or AMD guest). Signed-off-by: Jinrong Liang <cloudliang@tencent.com> Reviewed-by: Isaku Yamahata <isaku.yamahata@intel.com> Link: https://lore.kernel.org/r/20230810090945.16053-6-cloudliang@tencent.com Signed-off-by: Sean Christopherson <seanjc@google.com>
1 parent 86ab6af commit d4e3616

File tree

1 file changed

+80
-0
lines changed

1 file changed

+80
-0
lines changed

tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c

Lines changed: 80 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@
2727
#define ARCH_PERFMON_BRANCHES_RETIRED 5
2828

2929
#define NUM_BRANCHES 42
30+
#define INTEL_PMC_IDX_FIXED 32
3031

3132
/* Matches KVM_PMU_EVENT_FILTER_MAX_EVENTS in pmu.c */
3233
#define MAX_FILTER_EVENTS 300
@@ -808,6 +809,84 @@ static void test_filter_ioctl(struct kvm_vcpu *vcpu)
808809
TEST_ASSERT(!r, "Masking non-existent fixed counters should be allowed");
809810
}
810811

812+
static void intel_run_fixed_counter_guest_code(uint8_t fixed_ctr_idx)
813+
{
814+
for (;;) {
815+
wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0);
816+
wrmsr(MSR_CORE_PERF_FIXED_CTR0 + fixed_ctr_idx, 0);
817+
818+
/* Only OS_EN bit is enabled for fixed counter[idx]. */
819+
wrmsr(MSR_CORE_PERF_FIXED_CTR_CTRL, BIT_ULL(4 * fixed_ctr_idx));
820+
wrmsr(MSR_CORE_PERF_GLOBAL_CTRL,
821+
BIT_ULL(INTEL_PMC_IDX_FIXED + fixed_ctr_idx));
822+
__asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES}));
823+
wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0);
824+
825+
GUEST_SYNC(rdmsr(MSR_CORE_PERF_FIXED_CTR0 + fixed_ctr_idx));
826+
}
827+
}
828+
829+
static uint64_t test_with_fixed_counter_filter(struct kvm_vcpu *vcpu,
830+
uint32_t action, uint32_t bitmap)
831+
{
832+
struct __kvm_pmu_event_filter f = {
833+
.action = action,
834+
.fixed_counter_bitmap = bitmap,
835+
};
836+
set_pmu_event_filter(vcpu, &f);
837+
838+
return run_vcpu_to_sync(vcpu);
839+
}
840+
841+
static void __test_fixed_counter_bitmap(struct kvm_vcpu *vcpu, uint8_t idx,
842+
uint8_t nr_fixed_counters)
843+
{
844+
unsigned int i;
845+
uint32_t bitmap;
846+
uint64_t count;
847+
848+
TEST_ASSERT(nr_fixed_counters < sizeof(bitmap) * 8,
849+
"Invalid nr_fixed_counters");
850+
851+
/*
852+
* Check the fixed performance counter can count normally when KVM
853+
* userspace doesn't set any pmu filter.
854+
*/
855+
count = run_vcpu_to_sync(vcpu);
856+
TEST_ASSERT(count, "Unexpected count value: %ld\n", count);
857+
858+
for (i = 0; i < BIT(nr_fixed_counters); i++) {
859+
bitmap = BIT(i);
860+
count = test_with_fixed_counter_filter(vcpu, KVM_PMU_EVENT_ALLOW,
861+
bitmap);
862+
TEST_ASSERT_EQ(!!count, !!(bitmap & BIT(idx)));
863+
864+
count = test_with_fixed_counter_filter(vcpu, KVM_PMU_EVENT_DENY,
865+
bitmap);
866+
TEST_ASSERT_EQ(!!count, !(bitmap & BIT(idx)));
867+
}
868+
}
869+
870+
static void test_fixed_counter_bitmap(void)
871+
{
872+
uint8_t nr_fixed_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS);
873+
struct kvm_vm *vm;
874+
struct kvm_vcpu *vcpu;
875+
uint8_t idx;
876+
877+
/*
878+
* Check that pmu_event_filter works as expected when it's applied to
879+
* fixed performance counters.
880+
*/
881+
for (idx = 0; idx < nr_fixed_counters; idx++) {
882+
vm = vm_create_with_one_vcpu(&vcpu,
883+
intel_run_fixed_counter_guest_code);
884+
vcpu_args_set(vcpu, 1, idx);
885+
__test_fixed_counter_bitmap(vcpu, idx, nr_fixed_counters);
886+
kvm_vm_free(vm);
887+
}
888+
}
889+
811890
int main(int argc, char *argv[])
812891
{
813892
void (*guest_code)(void);
@@ -851,6 +930,7 @@ int main(int argc, char *argv[])
851930
kvm_vm_free(vm);
852931

853932
test_pmu_config_disable(guest_code);
933+
test_fixed_counter_bitmap();
854934

855935
return 0;
856936
}

0 commit comments

Comments
 (0)