Skip to content

Commit 9b47f28

Browse files
committed
Merge tag 'kvm-x86-selftests_6.15-1' of https://github.com/kvm-x86/linux into HEAD
KVM selftests changes for 6.15, part 1 - Misc cleanups and prep work. - Annotate _no_printf() with "printf" so that pr_debug() statements are checked by the compiler for default builds (and pr_info() when QUIET). - Attempt to whack the last LLC references/misses mole in the Intel PMU counters test by adding a data load and doing CLFLUSH{OPT} on the data instead of the code being executed. The theory is that modern Intel CPUs have learned new code prefetching tricks that bypass the PMU counters. - Fix a flaw in the Intel PMU counters test where it asserts that an event is counting correctly without actually knowing what the event counts on the underlying hardware.
2 parents 4d9a677 + 54108e7 commit 9b47f28

File tree

10 files changed

+151
-83
lines changed

10 files changed

+151
-83
lines changed

tools/testing/selftests/kvm/access_tracking_perf_test.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -239,7 +239,7 @@ static void vcpu_thread_main(struct memstress_vcpu_args *vcpu_args)
239239
case ITERATION_MARK_IDLE:
240240
mark_vcpu_memory_idle(vm, vcpu_args);
241241
break;
242-
};
242+
}
243243

244244
vcpu_last_completed_iteration[vcpu_idx] = current_iteration;
245245
}

tools/testing/selftests/kvm/include/test_util.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@
2222

2323
#define msecs_to_usecs(msec) ((msec) * 1000ULL)
2424

25-
static inline int _no_printf(const char *format, ...) { return 0; }
25+
static inline __printf(1, 2) int _no_printf(const char *format, ...) { return 0; }
2626

2727
#ifdef DEBUG
2828
#define pr_debug(...) printf(__VA_ARGS__)

tools/testing/selftests/kvm/include/x86/processor.h

Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -183,6 +183,9 @@ struct kvm_x86_cpu_feature {
183183
* Extended Leafs, a.k.a. AMD defined
184184
*/
185185
#define X86_FEATURE_SVM KVM_X86_CPU_FEATURE(0x80000001, 0, ECX, 2)
186+
#define X86_FEATURE_PERFCTR_CORE KVM_X86_CPU_FEATURE(0x80000001, 0, ECX, 23)
187+
#define X86_FEATURE_PERFCTR_NB KVM_X86_CPU_FEATURE(0x80000001, 0, ECX, 24)
188+
#define X86_FEATURE_PERFCTR_LLC KVM_X86_CPU_FEATURE(0x80000001, 0, ECX, 28)
186189
#define X86_FEATURE_NX KVM_X86_CPU_FEATURE(0x80000001, 0, EDX, 20)
187190
#define X86_FEATURE_GBPAGES KVM_X86_CPU_FEATURE(0x80000001, 0, EDX, 26)
188191
#define X86_FEATURE_RDTSCP KVM_X86_CPU_FEATURE(0x80000001, 0, EDX, 27)
@@ -199,6 +202,8 @@ struct kvm_x86_cpu_feature {
199202
#define X86_FEATURE_VGIF KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 16)
200203
#define X86_FEATURE_SEV KVM_X86_CPU_FEATURE(0x8000001F, 0, EAX, 1)
201204
#define X86_FEATURE_SEV_ES KVM_X86_CPU_FEATURE(0x8000001F, 0, EAX, 3)
205+
#define X86_FEATURE_PERFMON_V2 KVM_X86_CPU_FEATURE(0x80000022, 0, EAX, 0)
206+
#define X86_FEATURE_LBR_PMC_FREEZE KVM_X86_CPU_FEATURE(0x80000022, 0, EAX, 2)
202207

203208
/*
204209
* KVM defined paravirt features.
@@ -285,6 +290,8 @@ struct kvm_x86_cpu_property {
285290
#define X86_PROPERTY_GUEST_MAX_PHY_ADDR KVM_X86_CPU_PROPERTY(0x80000008, 0, EAX, 16, 23)
286291
#define X86_PROPERTY_SEV_C_BIT KVM_X86_CPU_PROPERTY(0x8000001F, 0, EBX, 0, 5)
287292
#define X86_PROPERTY_PHYS_ADDR_REDUCTION KVM_X86_CPU_PROPERTY(0x8000001F, 0, EBX, 6, 11)
293+
#define X86_PROPERTY_NR_PERFCTR_CORE KVM_X86_CPU_PROPERTY(0x80000022, 0, EBX, 0, 3)
294+
#define X86_PROPERTY_NR_PERFCTR_NB KVM_X86_CPU_PROPERTY(0x80000022, 0, EBX, 10, 15)
288295

289296
#define X86_PROPERTY_MAX_CENTAUR_LEAF KVM_X86_CPU_PROPERTY(0xC0000000, 0, EAX, 0, 31)
290297

@@ -1339,6 +1346,46 @@ static inline void kvm_hypercall_map_gpa_range(uint64_t gpa, uint64_t size,
13391346
GUEST_ASSERT(!ret);
13401347
}
13411348

1349+
/*
1350+
* Execute HLT in an STI interrupt shadow to ensure that a pending IRQ that's
1351+
* intended to be a wake event arrives *after* HLT is executed. Modern CPUs,
1352+
* except for a few oddballs that KVM is unlikely to run on, block IRQs for one
1353+
* instruction after STI, *if* RFLAGS.IF=0 before STI. Note, Intel CPUs may
1354+
* block other events beyond regular IRQs, e.g. may block NMIs and SMIs too.
1355+
*/
1356+
static inline void safe_halt(void)
1357+
{
1358+
asm volatile("sti; hlt");
1359+
}
1360+
1361+
/*
1362+
* Enable interrupts and ensure that interrupts are evaluated upon return from
1363+
* this function, i.e. execute a nop to consume the STi interrupt shadow.
1364+
*/
1365+
static inline void sti_nop(void)
1366+
{
1367+
asm volatile ("sti; nop");
1368+
}
1369+
1370+
/*
1371+
* Enable interrupts for one instruction (nop), to allow the CPU to process all
1372+
* interrupts that are already pending.
1373+
*/
1374+
static inline void sti_nop_cli(void)
1375+
{
1376+
asm volatile ("sti; nop; cli");
1377+
}
1378+
1379+
static inline void sti(void)
1380+
{
1381+
asm volatile("sti");
1382+
}
1383+
1384+
static inline void cli(void)
1385+
{
1386+
asm volatile ("cli");
1387+
}
1388+
13421389
void __vm_xsave_require_permission(uint64_t xfeature, const char *name);
13431390

13441391
#define vm_xsave_require_permission(xfeature) \

tools/testing/selftests/kvm/x86/hyperv_ipi.c

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -63,8 +63,10 @@ static void receiver_code(void *hcall_page, vm_vaddr_t pgs_gpa)
6363
/* Signal sender vCPU we're ready */
6464
ipis_rcvd[vcpu_id] = (u64)-1;
6565

66-
for (;;)
67-
asm volatile("sti; hlt; cli");
66+
for (;;) {
67+
safe_halt();
68+
cli();
69+
}
6870
}
6971

7072
static void guest_ipi_handler(struct ex_regs *regs)

0 commit comments

Comments
 (0)