|
27 | 27 | #define ARCH_PERFMON_BRANCHES_RETIRED 5
|
28 | 28 |
|
29 | 29 | #define NUM_BRANCHES 42
|
| 30 | +#define INTEL_PMC_IDX_FIXED 32 |
30 | 31 |
|
31 | 32 | /* Matches KVM_PMU_EVENT_FILTER_MAX_EVENTS in pmu.c */
|
32 | 33 | #define MAX_FILTER_EVENTS 300
|
@@ -808,6 +809,84 @@ static void test_filter_ioctl(struct kvm_vcpu *vcpu)
|
808 | 809 | TEST_ASSERT(!r, "Masking non-existent fixed counters should be allowed");
|
809 | 810 | }
|
810 | 811 |
|
| 812 | +static void intel_run_fixed_counter_guest_code(uint8_t fixed_ctr_idx) |
| 813 | +{ |
| 814 | + for (;;) { |
| 815 | + wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0); |
| 816 | + wrmsr(MSR_CORE_PERF_FIXED_CTR0 + fixed_ctr_idx, 0); |
| 817 | + |
| 818 | + /* Only OS_EN bit is enabled for fixed counter[idx]. */ |
| 819 | + wrmsr(MSR_CORE_PERF_FIXED_CTR_CTRL, BIT_ULL(4 * fixed_ctr_idx)); |
| 820 | + wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, |
| 821 | + BIT_ULL(INTEL_PMC_IDX_FIXED + fixed_ctr_idx)); |
| 822 | + __asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES})); |
| 823 | + wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0); |
| 824 | + |
| 825 | + GUEST_SYNC(rdmsr(MSR_CORE_PERF_FIXED_CTR0 + fixed_ctr_idx)); |
| 826 | + } |
| 827 | +} |
| 828 | + |
| 829 | +static uint64_t test_with_fixed_counter_filter(struct kvm_vcpu *vcpu, |
| 830 | + uint32_t action, uint32_t bitmap) |
| 831 | +{ |
| 832 | + struct __kvm_pmu_event_filter f = { |
| 833 | + .action = action, |
| 834 | + .fixed_counter_bitmap = bitmap, |
| 835 | + }; |
| 836 | + set_pmu_event_filter(vcpu, &f); |
| 837 | + |
| 838 | + return run_vcpu_to_sync(vcpu); |
| 839 | +} |
| 840 | + |
| 841 | +static void __test_fixed_counter_bitmap(struct kvm_vcpu *vcpu, uint8_t idx, |
| 842 | + uint8_t nr_fixed_counters) |
| 843 | +{ |
| 844 | + unsigned int i; |
| 845 | + uint32_t bitmap; |
| 846 | + uint64_t count; |
| 847 | + |
| 848 | + TEST_ASSERT(nr_fixed_counters < sizeof(bitmap) * 8, |
| 849 | + "Invalid nr_fixed_counters"); |
| 850 | + |
| 851 | + /* |
| 852 | + * Check the fixed performance counter can count normally when KVM |
| 853 | + * userspace doesn't set any pmu filter. |
| 854 | + */ |
| 855 | + count = run_vcpu_to_sync(vcpu); |
| 856 | + TEST_ASSERT(count, "Unexpected count value: %ld\n", count); |
| 857 | + |
| 858 | + for (i = 0; i < BIT(nr_fixed_counters); i++) { |
| 859 | + bitmap = BIT(i); |
| 860 | + count = test_with_fixed_counter_filter(vcpu, KVM_PMU_EVENT_ALLOW, |
| 861 | + bitmap); |
| 862 | + TEST_ASSERT_EQ(!!count, !!(bitmap & BIT(idx))); |
| 863 | + |
| 864 | + count = test_with_fixed_counter_filter(vcpu, KVM_PMU_EVENT_DENY, |
| 865 | + bitmap); |
| 866 | + TEST_ASSERT_EQ(!!count, !(bitmap & BIT(idx))); |
| 867 | + } |
| 868 | +} |
| 869 | + |
| 870 | +static void test_fixed_counter_bitmap(void) |
| 871 | +{ |
| 872 | + uint8_t nr_fixed_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS); |
| 873 | + struct kvm_vm *vm; |
| 874 | + struct kvm_vcpu *vcpu; |
| 875 | + uint8_t idx; |
| 876 | + |
| 877 | + /* |
| 878 | + * Check that pmu_event_filter works as expected when it's applied to |
| 879 | + * fixed performance counters. |
| 880 | + */ |
| 881 | + for (idx = 0; idx < nr_fixed_counters; idx++) { |
| 882 | + vm = vm_create_with_one_vcpu(&vcpu, |
| 883 | + intel_run_fixed_counter_guest_code); |
| 884 | + vcpu_args_set(vcpu, 1, idx); |
| 885 | + __test_fixed_counter_bitmap(vcpu, idx, nr_fixed_counters); |
| 886 | + kvm_vm_free(vm); |
| 887 | + } |
| 888 | +} |
| 889 | + |
811 | 890 | int main(int argc, char *argv[])
|
812 | 891 | {
|
813 | 892 | void (*guest_code)(void);
|
@@ -851,6 +930,7 @@ int main(int argc, char *argv[])
|
851 | 930 | kvm_vm_free(vm);
|
852 | 931 |
|
853 | 932 | test_pmu_config_disable(guest_code);
|
| 933 | + test_fixed_counter_bitmap(); |
854 | 934 |
|
855 | 935 | return 0;
|
856 | 936 | }
|
0 commit comments