Skip to content

Commit edd1e59

Browse files
committed
Merge tag 'kvm-x86-selftests-6.13' of https://github.com/kvm-x86/linux into HEAD
KVM selftests changes for 6.13 - Enable XFAM-based features by default for all selftests VMs, which will allow removing the "no AVX" restriction.
2 parents c59de14 + 89f8869 commit edd1e59

File tree

10 files changed

+126
-90
lines changed

10 files changed

+126
-90
lines changed

tools/testing/selftests/kvm/hardware_disable_test.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,6 @@
2020
#define SLEEPING_THREAD_NUM (1 << 4)
2121
#define FORK_NUM (1ULL << 9)
2222
#define DELAY_US_MAX 2000
23-
#define GUEST_CODE_PIO_PORT 4
2423

2524
sem_t *sem;
2625

tools/testing/selftests/kvm/include/x86_64/processor.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1049,6 +1049,11 @@ static inline void vcpu_set_cpuid(struct kvm_vcpu *vcpu)
10491049
vcpu_ioctl(vcpu, KVM_GET_CPUID2, vcpu->cpuid);
10501050
}
10511051

1052+
static inline void vcpu_get_cpuid(struct kvm_vcpu *vcpu)
1053+
{
1054+
vcpu_ioctl(vcpu, KVM_GET_CPUID2, vcpu->cpuid);
1055+
}
1056+
10521057
void vcpu_set_cpuid_property(struct kvm_vcpu *vcpu,
10531058
struct kvm_x86_cpu_property property,
10541059
uint32_t value);

tools/testing/selftests/kvm/lib/x86_64/processor.c

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -506,6 +506,8 @@ static void vcpu_init_sregs(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
506506

507507
sregs.cr0 = X86_CR0_PE | X86_CR0_NE | X86_CR0_PG;
508508
sregs.cr4 |= X86_CR4_PAE | X86_CR4_OSFXSR;
509+
if (kvm_cpu_has(X86_FEATURE_XSAVE))
510+
sregs.cr4 |= X86_CR4_OSXSAVE;
509511
sregs.efer |= (EFER_LME | EFER_LMA | EFER_NX);
510512

511513
kvm_seg_set_unusable(&sregs.ldt);
@@ -519,6 +521,20 @@ static void vcpu_init_sregs(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
519521
vcpu_sregs_set(vcpu, &sregs);
520522
}
521523

524+
static void vcpu_init_xcrs(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
525+
{
526+
struct kvm_xcrs xcrs = {
527+
.nr_xcrs = 1,
528+
.xcrs[0].xcr = 0,
529+
.xcrs[0].value = kvm_cpu_supported_xcr0(),
530+
};
531+
532+
if (!kvm_cpu_has(X86_FEATURE_XSAVE))
533+
return;
534+
535+
vcpu_xcrs_set(vcpu, &xcrs);
536+
}
537+
522538
static void set_idt_entry(struct kvm_vm *vm, int vector, unsigned long addr,
523539
int dpl, unsigned short selector)
524540
{
@@ -675,6 +691,7 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
675691
vcpu = __vm_vcpu_add(vm, vcpu_id);
676692
vcpu_init_cpuid(vcpu, kvm_get_supported_cpuid());
677693
vcpu_init_sregs(vm, vcpu);
694+
vcpu_init_xcrs(vm, vcpu);
678695

679696
/* Setup guest general purpose registers */
680697
vcpu_regs_get(vcpu, &regs);
@@ -686,6 +703,13 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
686703
mp_state.mp_state = 0;
687704
vcpu_mp_state_set(vcpu, &mp_state);
688705

706+
/*
707+
* Refresh CPUID after setting SREGS and XCR0, so that KVM's "runtime"
708+
* updates to guest CPUID, e.g. for OSXSAVE and XSAVE state size, are
709+
* reflected into selftests' vCPU CPUID cache, i.e. so that the cache
710+
* is consistent with vCPU state.
711+
*/
712+
vcpu_get_cpuid(vcpu);
689713
return vcpu;
690714
}
691715

tools/testing/selftests/kvm/x86_64/amx_test.c

Lines changed: 4 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -86,6 +86,8 @@ static inline void __xsavec(struct xstate *xstate, uint64_t rfbm)
8686

8787
static void check_xtile_info(void)
8888
{
89+
GUEST_ASSERT((xgetbv(0) & XFEATURE_MASK_XTILE) == XFEATURE_MASK_XTILE);
90+
8991
GUEST_ASSERT(this_cpu_has_p(X86_PROPERTY_XSTATE_MAX_SIZE_XCR0));
9092
GUEST_ASSERT(this_cpu_property(X86_PROPERTY_XSTATE_MAX_SIZE_XCR0) <= XSAVE_SIZE);
9193

@@ -122,29 +124,12 @@ static void set_tilecfg(struct tile_config *cfg)
122124
}
123125
}
124126

125-
static void init_regs(void)
126-
{
127-
uint64_t cr4, xcr0;
128-
129-
GUEST_ASSERT(this_cpu_has(X86_FEATURE_XSAVE));
130-
131-
/* turn on CR4.OSXSAVE */
132-
cr4 = get_cr4();
133-
cr4 |= X86_CR4_OSXSAVE;
134-
set_cr4(cr4);
135-
GUEST_ASSERT(this_cpu_has(X86_FEATURE_OSXSAVE));
136-
137-
xcr0 = xgetbv(0);
138-
xcr0 |= XFEATURE_MASK_XTILE;
139-
xsetbv(0x0, xcr0);
140-
GUEST_ASSERT((xgetbv(0) & XFEATURE_MASK_XTILE) == XFEATURE_MASK_XTILE);
141-
}
142-
143127
static void __attribute__((__flatten__)) guest_code(struct tile_config *amx_cfg,
144128
struct tile_data *tiledata,
145129
struct xstate *xstate)
146130
{
147-
init_regs();
131+
GUEST_ASSERT(this_cpu_has(X86_FEATURE_XSAVE) &&
132+
this_cpu_has(X86_FEATURE_OSXSAVE));
148133
check_xtile_info();
149134
GUEST_SYNC(1);
150135

tools/testing/selftests/kvm/x86_64/cpuid_test.c

Lines changed: 43 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -12,17 +12,16 @@
1212
#include "kvm_util.h"
1313
#include "processor.h"
1414

15-
/* CPUIDs known to differ */
16-
struct {
17-
u32 function;
18-
u32 index;
19-
} mangled_cpuids[] = {
20-
/*
21-
* These entries depend on the vCPU's XCR0 register and IA32_XSS MSR,
22-
* which are not controlled for by this test.
23-
*/
24-
{.function = 0xd, .index = 0},
25-
{.function = 0xd, .index = 1},
15+
struct cpuid_mask {
16+
union {
17+
struct {
18+
u32 eax;
19+
u32 ebx;
20+
u32 ecx;
21+
u32 edx;
22+
};
23+
u32 regs[4];
24+
};
2625
};
2726

2827
static void test_guest_cpuids(struct kvm_cpuid2 *guest_cpuid)
@@ -56,17 +55,29 @@ static void guest_main(struct kvm_cpuid2 *guest_cpuid)
5655
GUEST_DONE();
5756
}
5857

59-
static bool is_cpuid_mangled(const struct kvm_cpuid_entry2 *entrie)
58+
static struct cpuid_mask get_const_cpuid_mask(const struct kvm_cpuid_entry2 *entry)
6059
{
61-
int i;
62-
63-
for (i = 0; i < ARRAY_SIZE(mangled_cpuids); i++) {
64-
if (mangled_cpuids[i].function == entrie->function &&
65-
mangled_cpuids[i].index == entrie->index)
66-
return true;
60+
struct cpuid_mask mask;
61+
62+
memset(&mask, 0xff, sizeof(mask));
63+
64+
switch (entry->function) {
65+
case 0x1:
66+
mask.regs[X86_FEATURE_OSXSAVE.reg] &= ~BIT(X86_FEATURE_OSXSAVE.bit);
67+
break;
68+
case 0x7:
69+
mask.regs[X86_FEATURE_OSPKE.reg] &= ~BIT(X86_FEATURE_OSPKE.bit);
70+
break;
71+
case 0xd:
72+
/*
73+
* CPUID.0xD.{0,1}.EBX enumerate XSAVE size based on the current
74+
* XCR0 and IA32_XSS MSR values.
75+
*/
76+
if (entry->index < 2)
77+
mask.ebx = 0;
78+
break;
6779
}
68-
69-
return false;
80+
return mask;
7081
}
7182

7283
static void compare_cpuids(const struct kvm_cpuid2 *cpuid1,
@@ -79,6 +90,8 @@ static void compare_cpuids(const struct kvm_cpuid2 *cpuid1,
7990
"CPUID nent mismatch: %d vs. %d", cpuid1->nent, cpuid2->nent);
8091

8192
for (i = 0; i < cpuid1->nent; i++) {
93+
struct cpuid_mask mask;
94+
8295
e1 = &cpuid1->entries[i];
8396
e2 = &cpuid2->entries[i];
8497

@@ -88,15 +101,19 @@ static void compare_cpuids(const struct kvm_cpuid2 *cpuid1,
88101
i, e1->function, e1->index, e1->flags,
89102
e2->function, e2->index, e2->flags);
90103

91-
if (is_cpuid_mangled(e1))
92-
continue;
104+
/* Mask off dynamic bits, e.g. OSXSAVE, when comparing entries. */
105+
mask = get_const_cpuid_mask(e1);
93106

94-
TEST_ASSERT(e1->eax == e2->eax && e1->ebx == e2->ebx &&
95-
e1->ecx == e2->ecx && e1->edx == e2->edx,
107+
TEST_ASSERT((e1->eax & mask.eax) == (e2->eax & mask.eax) &&
108+
(e1->ebx & mask.ebx) == (e2->ebx & mask.ebx) &&
109+
(e1->ecx & mask.ecx) == (e2->ecx & mask.ecx) &&
110+
(e1->edx & mask.edx) == (e2->edx & mask.edx),
96111
"CPUID 0x%x.%x differ: 0x%x:0x%x:0x%x:0x%x vs 0x%x:0x%x:0x%x:0x%x",
97112
e1->function, e1->index,
98-
e1->eax, e1->ebx, e1->ecx, e1->edx,
99-
e2->eax, e2->ebx, e2->ecx, e2->edx);
113+
e1->eax & mask.eax, e1->ebx & mask.ebx,
114+
e1->ecx & mask.ecx, e1->edx & mask.edx,
115+
e2->eax & mask.eax, e2->ebx & mask.ebx,
116+
e2->ecx & mask.ecx, e2->edx & mask.edx);
100117
}
101118
}
102119

tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c

Lines changed: 36 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -19,30 +19,42 @@
1919
#include "kvm_util.h"
2020
#include "processor.h"
2121

22-
static inline bool cr4_cpuid_is_sync(void)
23-
{
24-
uint64_t cr4 = get_cr4();
25-
26-
return (this_cpu_has(X86_FEATURE_OSXSAVE) == !!(cr4 & X86_CR4_OSXSAVE));
27-
}
22+
#define MAGIC_HYPERCALL_PORT 0x80
2823

2924
static void guest_code(void)
3025
{
31-
uint64_t cr4;
26+
u32 regs[4] = {
27+
[KVM_CPUID_EAX] = X86_FEATURE_OSXSAVE.function,
28+
[KVM_CPUID_ECX] = X86_FEATURE_OSXSAVE.index,
29+
};
3230

33-
/* turn on CR4.OSXSAVE */
34-
cr4 = get_cr4();
35-
cr4 |= X86_CR4_OSXSAVE;
36-
set_cr4(cr4);
31+
/* CR4.OSXSAVE should be enabled by default (for selftests vCPUs). */
32+
GUEST_ASSERT(get_cr4() & X86_CR4_OSXSAVE);
3733

3834
/* verify CR4.OSXSAVE == CPUID.OSXSAVE */
39-
GUEST_ASSERT(cr4_cpuid_is_sync());
40-
41-
/* notify hypervisor to change CR4 */
42-
GUEST_SYNC(0);
43-
44-
/* check again */
45-
GUEST_ASSERT(cr4_cpuid_is_sync());
35+
GUEST_ASSERT(this_cpu_has(X86_FEATURE_OSXSAVE));
36+
37+
/*
38+
* Notify hypervisor to clear CR4.0SXSAVE, do CPUID and save output,
39+
* and then restore CR4. Do this all in assembly to ensure no AVX
40+
* instructions are executed while OSXSAVE=0.
41+
*/
42+
asm volatile (
43+
"out %%al, $" __stringify(MAGIC_HYPERCALL_PORT) "\n\t"
44+
"cpuid\n\t"
45+
"mov %%rdi, %%cr4\n\t"
46+
: "+a" (regs[KVM_CPUID_EAX]),
47+
"=b" (regs[KVM_CPUID_EBX]),
48+
"+c" (regs[KVM_CPUID_ECX]),
49+
"=d" (regs[KVM_CPUID_EDX])
50+
: "D" (get_cr4())
51+
);
52+
53+
/* Verify KVM cleared OSXSAVE in CPUID when it was cleared in CR4. */
54+
GUEST_ASSERT(!(regs[X86_FEATURE_OSXSAVE.reg] & BIT(X86_FEATURE_OSXSAVE.bit)));
55+
56+
/* Verify restoring CR4 also restored OSXSAVE in CPUID. */
57+
GUEST_ASSERT(this_cpu_has(X86_FEATURE_OSXSAVE));
4658

4759
GUEST_DONE();
4860
}
@@ -62,13 +74,16 @@ int main(int argc, char *argv[])
6274
vcpu_run(vcpu);
6375
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
6476

65-
switch (get_ucall(vcpu, &uc)) {
66-
case UCALL_SYNC:
77+
if (vcpu->run->io.port == MAGIC_HYPERCALL_PORT &&
78+
vcpu->run->io.direction == KVM_EXIT_IO_OUT) {
6779
/* emulate hypervisor clearing CR4.OSXSAVE */
6880
vcpu_sregs_get(vcpu, &sregs);
6981
sregs.cr4 &= ~X86_CR4_OSXSAVE;
7082
vcpu_sregs_set(vcpu, &sregs);
71-
break;
83+
continue;
84+
}
85+
86+
switch (get_ucall(vcpu, &uc)) {
7287
case UCALL_ABORT:
7388
REPORT_GUEST_ASSERT(uc);
7489
break;

tools/testing/selftests/kvm/x86_64/debug_regs.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -166,7 +166,7 @@ int main(void)
166166
/* Test single step */
167167
target_rip = CAST_TO_RIP(ss_start);
168168
target_dr6 = 0xffff4ff0ULL;
169-
for (i = 0; i < (sizeof(ss_size) / sizeof(ss_size[0])); i++) {
169+
for (i = 0; i < ARRAY_SIZE(ss_size); i++) {
170170
target_rip += ss_size[i];
171171
memset(&debug, 0, sizeof(debug));
172172
debug.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP |

tools/testing/selftests/kvm/x86_64/sev_smoke_test.c

Lines changed: 5 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -41,8 +41,8 @@ static void guest_sev_code(void)
4141
/* Stash state passed via VMSA before any compiled code runs. */
4242
extern void guest_code_xsave(void);
4343
asm("guest_code_xsave:\n"
44-
"mov $-1, %eax\n"
45-
"mov $-1, %edx\n"
44+
"mov $" __stringify(XFEATURE_MASK_X87_AVX) ", %eax\n"
45+
"xor %edx, %edx\n"
4646
"xsave (%rdi)\n"
4747
"jmp guest_sev_es_code");
4848

@@ -70,12 +70,6 @@ static void test_sync_vmsa(uint32_t policy)
7070

7171
double x87val = M_PI;
7272
struct kvm_xsave __attribute__((aligned(64))) xsave = { 0 };
73-
struct kvm_sregs sregs;
74-
struct kvm_xcrs xcrs = {
75-
.nr_xcrs = 1,
76-
.xcrs[0].xcr = 0,
77-
.xcrs[0].value = XFEATURE_MASK_X87_AVX,
78-
};
7973

8074
vm = vm_sev_create_with_one_vcpu(KVM_X86_SEV_ES_VM, guest_code_xsave, &vcpu);
8175
gva = vm_vaddr_alloc_shared(vm, PAGE_SIZE, KVM_UTIL_MIN_VADDR,
@@ -84,11 +78,6 @@ static void test_sync_vmsa(uint32_t policy)
8478

8579
vcpu_args_set(vcpu, 1, gva);
8680

87-
vcpu_sregs_get(vcpu, &sregs);
88-
sregs.cr4 |= X86_CR4_OSFXSR | X86_CR4_OSXSAVE;
89-
vcpu_sregs_set(vcpu, &sregs);
90-
91-
vcpu_xcrs_set(vcpu, &xcrs);
9281
asm("fninit\n"
9382
"vpcmpeqb %%ymm4, %%ymm4, %%ymm4\n"
9483
"fldl %3\n"
@@ -192,6 +181,8 @@ static void test_sev_es_shutdown(void)
192181

193182
int main(int argc, char *argv[])
194183
{
184+
const u64 xf_mask = XFEATURE_MASK_X87_AVX;
185+
195186
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SEV));
196187

197188
test_sev(guest_sev_code, SEV_POLICY_NO_DBG);
@@ -204,7 +195,7 @@ int main(int argc, char *argv[])
204195
test_sev_es_shutdown();
205196

206197
if (kvm_has_cap(KVM_CAP_XCRS) &&
207-
(xgetbv(0) & XFEATURE_MASK_X87_AVX) == XFEATURE_MASK_X87_AVX) {
198+
(xgetbv(0) & kvm_cpu_supported_xcr0() & xf_mask) == xf_mask) {
208199
test_sync_vmsa(0);
209200
test_sync_vmsa(SEV_POLICY_NO_DBG);
210201
}

tools/testing/selftests/kvm/x86_64/state_test.c

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -145,11 +145,6 @@ static void __attribute__((__flatten__)) guest_code(void *arg)
145145

146146
memset(buffer, 0xcc, sizeof(buffer));
147147

148-
set_cr4(get_cr4() | X86_CR4_OSXSAVE);
149-
GUEST_ASSERT(this_cpu_has(X86_FEATURE_OSXSAVE));
150-
151-
xsetbv(0, xgetbv(0) | supported_xcr0);
152-
153148
/*
154149
* Modify state for all supported xfeatures to take them out of
155150
* their "init" state, i.e. to make them show up in XSTATE_BV.

0 commit comments

Comments
 (0)