Skip to content

Commit 88e4cd8

Browse files
committed
Merge tag 'kvm-x86-pmu-6.6-fixes' of https://github.com/kvm-x86/linux into HEAD
KVM x86/pmu fixes for 6.6: - Truncate writes to PMU counters to the counter's width to avoid spurious overflows when emulating counter events in software. - Set the LVTPC entry mask bit when handling a PMI (to match Intel-defined architectural behavior). - Treat KVM_REQ_PMI as a wake event instead of queueing host IRQ work to kick the guest out of emulated halt.
2 parents 24422df + 73554b2 commit 88e4cd8

File tree

7 files changed

+19
-32
lines changed

7 files changed

+19
-32
lines changed

arch/x86/include/asm/kvm_host.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -528,7 +528,6 @@ struct kvm_pmu {
528528
u64 raw_event_mask;
529529
struct kvm_pmc gp_counters[KVM_INTEL_PMC_MAX_GENERIC];
530530
struct kvm_pmc fixed_counters[KVM_PMC_MAX_FIXED];
531-
struct irq_work irq_work;
532531

533532
/*
534533
* Overlay the bitmap with a 64-bit atomic so that all bits can be

arch/x86/kvm/lapic.c

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2759,13 +2759,17 @@ int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
27592759
{
27602760
u32 reg = kvm_lapic_get_reg(apic, lvt_type);
27612761
int vector, mode, trig_mode;
2762+
int r;
27622763

27632764
if (kvm_apic_hw_enabled(apic) && !(reg & APIC_LVT_MASKED)) {
27642765
vector = reg & APIC_VECTOR_MASK;
27652766
mode = reg & APIC_MODE_MASK;
27662767
trig_mode = reg & APIC_LVT_LEVEL_TRIGGER;
2767-
return __apic_accept_irq(apic, mode, vector, 1, trig_mode,
2768-
NULL);
2768+
2769+
r = __apic_accept_irq(apic, mode, vector, 1, trig_mode, NULL);
2770+
if (r && lvt_type == APIC_LVTPC)
2771+
kvm_lapic_set_reg(apic, APIC_LVTPC, reg | APIC_LVT_MASKED);
2772+
return r;
27692773
}
27702774
return 0;
27712775
}

arch/x86/kvm/pmu.c

Lines changed: 1 addition & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -93,14 +93,6 @@ void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops)
9393
#undef __KVM_X86_PMU_OP
9494
}
9595

96-
static void kvm_pmi_trigger_fn(struct irq_work *irq_work)
97-
{
98-
struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu, irq_work);
99-
struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
100-
101-
kvm_pmu_deliver_pmi(vcpu);
102-
}
103-
10496
static inline void __kvm_perf_overflow(struct kvm_pmc *pmc, bool in_pmi)
10597
{
10698
struct kvm_pmu *pmu = pmc_to_pmu(pmc);
@@ -124,20 +116,7 @@ static inline void __kvm_perf_overflow(struct kvm_pmc *pmc, bool in_pmi)
124116
__set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
125117
}
126118

127-
if (!pmc->intr || skip_pmi)
128-
return;
129-
130-
/*
131-
* Inject PMI. If vcpu was in a guest mode during NMI PMI
132-
* can be ejected on a guest mode re-entry. Otherwise we can't
133-
* be sure that vcpu wasn't executing hlt instruction at the
134-
* time of vmexit and is not going to re-enter guest mode until
135-
* woken up. So we should wake it, but this is impossible from
136-
* NMI context. Do it from irq work instead.
137-
*/
138-
if (in_pmi && !kvm_handling_nmi_from_guest(pmc->vcpu))
139-
irq_work_queue(&pmc_to_pmu(pmc)->irq_work);
140-
else
119+
if (pmc->intr && !skip_pmi)
141120
kvm_make_request(KVM_REQ_PMI, pmc->vcpu);
142121
}
143122

@@ -675,9 +654,6 @@ void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
675654

676655
void kvm_pmu_reset(struct kvm_vcpu *vcpu)
677656
{
678-
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
679-
680-
irq_work_sync(&pmu->irq_work);
681657
static_call(kvm_x86_pmu_reset)(vcpu);
682658
}
683659

@@ -687,7 +663,6 @@ void kvm_pmu_init(struct kvm_vcpu *vcpu)
687663

688664
memset(pmu, 0, sizeof(*pmu));
689665
static_call(kvm_x86_pmu_init)(vcpu);
690-
init_irq_work(&pmu->irq_work, kvm_pmi_trigger_fn);
691666
pmu->event_count = 0;
692667
pmu->need_cleanup = false;
693668
kvm_pmu_refresh(vcpu);

arch/x86/kvm/pmu.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -74,6 +74,12 @@ static inline u64 pmc_read_counter(struct kvm_pmc *pmc)
7474
return counter & pmc_bitmask(pmc);
7575
}
7676

77+
static inline void pmc_write_counter(struct kvm_pmc *pmc, u64 val)
78+
{
79+
pmc->counter += val - pmc_read_counter(pmc);
80+
pmc->counter &= pmc_bitmask(pmc);
81+
}
82+
7783
static inline void pmc_release_perf_event(struct kvm_pmc *pmc)
7884
{
7985
if (pmc->perf_event) {

arch/x86/kvm/svm/pmu.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -160,7 +160,7 @@ static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
160160
/* MSR_PERFCTRn */
161161
pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
162162
if (pmc) {
163-
pmc->counter += data - pmc_read_counter(pmc);
163+
pmc_write_counter(pmc, data);
164164
pmc_update_sample_period(pmc);
165165
return 0;
166166
}

arch/x86/kvm/vmx/pmu_intel.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -436,11 +436,11 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
436436
if (!msr_info->host_initiated &&
437437
!(msr & MSR_PMC_FULL_WIDTH_BIT))
438438
data = (s64)(s32)data;
439-
pmc->counter += data - pmc_read_counter(pmc);
439+
pmc_write_counter(pmc, data);
440440
pmc_update_sample_period(pmc);
441441
break;
442442
} else if ((pmc = get_fixed_pmc(pmu, msr))) {
443-
pmc->counter += data - pmc_read_counter(pmc);
443+
pmc_write_counter(pmc, data);
444444
pmc_update_sample_period(pmc);
445445
break;
446446
} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {

arch/x86/kvm/x86.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12854,6 +12854,9 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
1285412854
return true;
1285512855
#endif
1285612856

12857+
if (kvm_test_request(KVM_REQ_PMI, vcpu))
12858+
return true;
12859+
1285712860
if (kvm_arch_interrupt_allowed(vcpu) &&
1285812861
(kvm_cpu_has_interrupt(vcpu) ||
1285912862
kvm_guest_apic_has_interrupt(vcpu)))

0 commit comments

Comments
 (0)