|
19 | 19 | #include "lapic.h"
|
20 | 20 | #include "nested.h"
|
21 | 21 | #include "pmu.h"
|
| 22 | +#include "tdx.h" |
22 | 23 |
|
23 | 24 | /*
|
24 | 25 | * Perf's "BASE" is wildly misleading, architectural PMUs use bits 31:16 of ECX
|
|
34 | 35 |
|
35 | 36 | #define MSR_PMC_FULL_WIDTH_BIT (MSR_IA32_PMC0 - MSR_IA32_PERFCTR0)
|
36 | 37 |
|
| 38 | +static struct lbr_desc *vcpu_to_lbr_desc(struct kvm_vcpu *vcpu) |
| 39 | +{ |
| 40 | + if (is_td_vcpu(vcpu)) |
| 41 | + return NULL; |
| 42 | + |
| 43 | + return &to_vmx(vcpu)->lbr_desc; |
| 44 | +} |
| 45 | + |
| 46 | +static struct x86_pmu_lbr *vcpu_to_lbr_records(struct kvm_vcpu *vcpu) |
| 47 | +{ |
| 48 | + if (is_td_vcpu(vcpu)) |
| 49 | + return NULL; |
| 50 | + |
| 51 | + return &to_vmx(vcpu)->lbr_desc.records; |
| 52 | +} |
| 53 | + |
| 54 | +#pragma GCC poison to_vmx |
| 55 | + |
37 | 56 | static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
|
38 | 57 | {
|
39 | 58 | struct kvm_pmc *pmc;
|
@@ -129,6 +148,22 @@ static inline struct kvm_pmc *get_fw_gp_pmc(struct kvm_pmu *pmu, u32 msr)
|
129 | 148 | return get_gp_pmc(pmu, msr, MSR_IA32_PMC0);
|
130 | 149 | }
|
131 | 150 |
|
| 151 | +static bool intel_pmu_lbr_is_compatible(struct kvm_vcpu *vcpu) |
| 152 | +{ |
| 153 | + if (is_td_vcpu(vcpu)) |
| 154 | + return false; |
| 155 | + |
| 156 | + return cpuid_model_is_consistent(vcpu); |
| 157 | +} |
| 158 | + |
| 159 | +bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu) |
| 160 | +{ |
| 161 | + if (is_td_vcpu(vcpu)) |
| 162 | + return false; |
| 163 | + |
| 164 | + return !!vcpu_to_lbr_records(vcpu)->nr; |
| 165 | +} |
| 166 | + |
132 | 167 | static bool intel_pmu_is_valid_lbr_msr(struct kvm_vcpu *vcpu, u32 index)
|
133 | 168 | {
|
134 | 169 | struct x86_pmu_lbr *records = vcpu_to_lbr_records(vcpu);
|
@@ -194,6 +229,9 @@ static inline void intel_pmu_release_guest_lbr_event(struct kvm_vcpu *vcpu)
|
194 | 229 | {
|
195 | 230 | struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
|
196 | 231 |
|
| 232 | + if (!lbr_desc) |
| 233 | + return; |
| 234 | + |
197 | 235 | if (lbr_desc->event) {
|
198 | 236 | perf_event_release_kernel(lbr_desc->event);
|
199 | 237 | lbr_desc->event = NULL;
|
@@ -235,6 +273,9 @@ int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu)
|
235 | 273 | PERF_SAMPLE_BRANCH_USER,
|
236 | 274 | };
|
237 | 275 |
|
| 276 | + if (WARN_ON_ONCE(!lbr_desc)) |
| 277 | + return 0; |
| 278 | + |
238 | 279 | if (unlikely(lbr_desc->event)) {
|
239 | 280 | __set_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use);
|
240 | 281 | return 0;
|
@@ -466,6 +507,9 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
|
466 | 507 | u64 perf_capabilities;
|
467 | 508 | u64 counter_rsvd;
|
468 | 509 |
|
| 510 | + if (!lbr_desc) |
| 511 | + return; |
| 512 | + |
469 | 513 | memset(&lbr_desc->records, 0, sizeof(lbr_desc->records));
|
470 | 514 |
|
471 | 515 | /*
|
@@ -542,7 +586,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
|
542 | 586 | INTEL_PMC_MAX_GENERIC, pmu->nr_arch_fixed_counters);
|
543 | 587 |
|
544 | 588 | perf_capabilities = vcpu_get_perf_capabilities(vcpu);
|
545 |
| - if (cpuid_model_is_consistent(vcpu) && |
| 589 | + if (intel_pmu_lbr_is_compatible(vcpu) && |
546 | 590 | (perf_capabilities & PMU_CAP_LBR_FMT))
|
547 | 591 | memcpy(&lbr_desc->records, &vmx_lbr_caps, sizeof(vmx_lbr_caps));
|
548 | 592 | else
|
@@ -570,6 +614,9 @@ static void intel_pmu_init(struct kvm_vcpu *vcpu)
|
570 | 614 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
571 | 615 | struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
|
572 | 616 |
|
| 617 | + if (!lbr_desc) |
| 618 | + return; |
| 619 | + |
573 | 620 | for (i = 0; i < KVM_MAX_NR_INTEL_GP_COUNTERS; i++) {
|
574 | 621 | pmu->gp_counters[i].type = KVM_PMC_GP;
|
575 | 622 | pmu->gp_counters[i].vcpu = vcpu;
|
@@ -677,6 +724,9 @@ void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu)
|
677 | 724 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
678 | 725 | struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
|
679 | 726 |
|
| 727 | + if (WARN_ON_ONCE(!lbr_desc)) |
| 728 | + return; |
| 729 | + |
680 | 730 | if (!lbr_desc->event) {
|
681 | 731 | vmx_disable_lbr_msrs_passthrough(vcpu);
|
682 | 732 | if (vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR)
|
|
0 commit comments