Skip to content

Commit ffb6fc8

Browse files
yamahatabonzini
authored andcommitted
KVM: TDX: Make pmu_intel.c ignore guest TD case
TDX KVM doesn't support PMU yet, it's future work of TDX KVM support as another patch series. For now, handle TDX by updating vcpu_to_lbr_desc() and vcpu_to_lbr_records() to return NULL. Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com> Co-developed-by: Tony Lindgren <tony.lindgren@linux.intel.com> Signed-off-by: Tony Lindgren <tony.lindgren@linux.intel.com> Signed-off-by: Rick Edgecombe <rick.p.edgecombe@intel.com> --- - Add pragma poison for to_vmx() (Paolo) Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
1 parent 0186dd2 commit ffb6fc8

File tree

3 files changed

+80
-34
lines changed

3 files changed

+80
-34
lines changed

arch/x86/kvm/vmx/pmu_intel.c

Lines changed: 51 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919
#include "lapic.h"
2020
#include "nested.h"
2121
#include "pmu.h"
22+
#include "tdx.h"
2223

2324
/*
2425
* Perf's "BASE" is wildly misleading, architectural PMUs use bits 31:16 of ECX
@@ -34,6 +35,24 @@
3435

3536
#define MSR_PMC_FULL_WIDTH_BIT (MSR_IA32_PMC0 - MSR_IA32_PERFCTR0)
3637

38+
static struct lbr_desc *vcpu_to_lbr_desc(struct kvm_vcpu *vcpu)
39+
{
40+
if (is_td_vcpu(vcpu))
41+
return NULL;
42+
43+
return &to_vmx(vcpu)->lbr_desc;
44+
}
45+
46+
static struct x86_pmu_lbr *vcpu_to_lbr_records(struct kvm_vcpu *vcpu)
47+
{
48+
if (is_td_vcpu(vcpu))
49+
return NULL;
50+
51+
return &to_vmx(vcpu)->lbr_desc.records;
52+
}
53+
54+
#pragma GCC poison to_vmx
55+
3756
static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
3857
{
3958
struct kvm_pmc *pmc;
@@ -129,6 +148,22 @@ static inline struct kvm_pmc *get_fw_gp_pmc(struct kvm_pmu *pmu, u32 msr)
129148
return get_gp_pmc(pmu, msr, MSR_IA32_PMC0);
130149
}
131150

151+
static bool intel_pmu_lbr_is_compatible(struct kvm_vcpu *vcpu)
152+
{
153+
if (is_td_vcpu(vcpu))
154+
return false;
155+
156+
return cpuid_model_is_consistent(vcpu);
157+
}
158+
159+
bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu)
160+
{
161+
if (is_td_vcpu(vcpu))
162+
return false;
163+
164+
return !!vcpu_to_lbr_records(vcpu)->nr;
165+
}
166+
132167
static bool intel_pmu_is_valid_lbr_msr(struct kvm_vcpu *vcpu, u32 index)
133168
{
134169
struct x86_pmu_lbr *records = vcpu_to_lbr_records(vcpu);
@@ -194,6 +229,9 @@ static inline void intel_pmu_release_guest_lbr_event(struct kvm_vcpu *vcpu)
194229
{
195230
struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
196231

232+
if (!lbr_desc)
233+
return;
234+
197235
if (lbr_desc->event) {
198236
perf_event_release_kernel(lbr_desc->event);
199237
lbr_desc->event = NULL;
@@ -235,6 +273,9 @@ int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu)
235273
PERF_SAMPLE_BRANCH_USER,
236274
};
237275

276+
if (WARN_ON_ONCE(!lbr_desc))
277+
return 0;
278+
238279
if (unlikely(lbr_desc->event)) {
239280
__set_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use);
240281
return 0;
@@ -466,6 +507,9 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
466507
u64 perf_capabilities;
467508
u64 counter_rsvd;
468509

510+
if (!lbr_desc)
511+
return;
512+
469513
memset(&lbr_desc->records, 0, sizeof(lbr_desc->records));
470514

471515
/*
@@ -542,7 +586,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
542586
INTEL_PMC_MAX_GENERIC, pmu->nr_arch_fixed_counters);
543587

544588
perf_capabilities = vcpu_get_perf_capabilities(vcpu);
545-
if (cpuid_model_is_consistent(vcpu) &&
589+
if (intel_pmu_lbr_is_compatible(vcpu) &&
546590
(perf_capabilities & PMU_CAP_LBR_FMT))
547591
memcpy(&lbr_desc->records, &vmx_lbr_caps, sizeof(vmx_lbr_caps));
548592
else
@@ -570,6 +614,9 @@ static void intel_pmu_init(struct kvm_vcpu *vcpu)
570614
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
571615
struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
572616

617+
if (!lbr_desc)
618+
return;
619+
573620
for (i = 0; i < KVM_MAX_NR_INTEL_GP_COUNTERS; i++) {
574621
pmu->gp_counters[i].type = KVM_PMC_GP;
575622
pmu->gp_counters[i].vcpu = vcpu;
@@ -677,6 +724,9 @@ void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu)
677724
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
678725
struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
679726

727+
if (WARN_ON_ONCE(!lbr_desc))
728+
return;
729+
680730
if (!lbr_desc->event) {
681731
vmx_disable_lbr_msrs_passthrough(vcpu);
682732
if (vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR)

arch/x86/kvm/vmx/pmu_intel.h

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,28 @@
1+
/* SPDX-License-Identifier: GPL-2.0 */
2+
#ifndef __KVM_X86_VMX_PMU_INTEL_H
3+
#define __KVM_X86_VMX_PMU_INTEL_H
4+
5+
#include <linux/kvm_host.h>
6+
7+
bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu);
8+
int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu);
9+
10+
struct lbr_desc {
11+
/* Basic info about guest LBR records. */
12+
struct x86_pmu_lbr records;
13+
14+
/*
15+
* Emulate LBR feature via passthrough LBR registers when the
16+
* per-vcpu guest LBR event is scheduled on the current pcpu.
17+
*
18+
* The records may be inaccurate if the host reclaims the LBR.
19+
*/
20+
struct perf_event *event;
21+
22+
/* True if LBRs are marked as not intercepted in the MSR bitmap */
23+
bool msr_passthrough;
24+
};
25+
26+
extern struct x86_pmu_lbr vmx_lbr_caps;
27+
28+
#endif /* __KVM_X86_VMX_PMU_INTEL_H */

arch/x86/kvm/vmx/vmx.h

Lines changed: 1 addition & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111

1212
#include "capabilities.h"
1313
#include "../kvm_cache_regs.h"
14+
#include "pmu_intel.h"
1415
#include "vmcs.h"
1516
#include "vmx_ops.h"
1617
#include "../cpuid.h"
@@ -90,24 +91,6 @@ union vmx_exit_reason {
9091
u32 full;
9192
};
9293

93-
struct lbr_desc {
94-
/* Basic info about guest LBR records. */
95-
struct x86_pmu_lbr records;
96-
97-
/*
98-
* Emulate LBR feature via passthrough LBR registers when the
99-
* per-vcpu guest LBR event is scheduled on the current pcpu.
100-
*
101-
* The records may be inaccurate if the host reclaims the LBR.
102-
*/
103-
struct perf_event *event;
104-
105-
/* True if LBRs are marked as not intercepted in the MSR bitmap */
106-
bool msr_passthrough;
107-
};
108-
109-
extern struct x86_pmu_lbr vmx_lbr_caps;
110-
11194
/*
11295
* The nested_vmx structure is part of vcpu_vmx, and holds information we need
11396
* for correct emulation of VMX (i.e., nested VMX) on this vcpu.
@@ -662,21 +645,6 @@ static __always_inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
662645
return container_of(vcpu, struct vcpu_vmx, vcpu);
663646
}
664647

665-
static inline struct lbr_desc *vcpu_to_lbr_desc(struct kvm_vcpu *vcpu)
666-
{
667-
return &to_vmx(vcpu)->lbr_desc;
668-
}
669-
670-
static inline struct x86_pmu_lbr *vcpu_to_lbr_records(struct kvm_vcpu *vcpu)
671-
{
672-
return &vcpu_to_lbr_desc(vcpu)->records;
673-
}
674-
675-
static inline bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu)
676-
{
677-
return !!vcpu_to_lbr_records(vcpu)->nr;
678-
}
679-
680648
void intel_pmu_cross_mapped_check(struct kvm_pmu *pmu);
681649
int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu);
682650
void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu);

0 commit comments

Comments
 (0)