Skip to content

Commit 4d4c028

Browse files
committed
Merge tag 'kvm-x86-selftests-6.9' of https://github.com/kvm-x86/linux into HEAD
KVM selftests changes for 6.9: - Add macros to reduce the amount of boilerplate code needed to write "simple" selftests, and to utilize selftest TAP infrastructure, which is especially beneficial for KVM selftests with multiple testcases. - Add basic smoke tests for SEV and SEV-ES, along with a pile of library support for handling private/encrypted/protected memory. - Fix benign bugs where tests neglect to close() guest_memfd files.
2 parents f074158 + e9da6f0 commit 4d4c028

26 files changed

+802
-240
lines changed

tools/testing/selftests/kvm/Makefile

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,7 @@ LIBKVM_x86_64 += lib/x86_64/handlers.S
3737
LIBKVM_x86_64 += lib/x86_64/hyperv.c
3838
LIBKVM_x86_64 += lib/x86_64/memstress.c
3939
LIBKVM_x86_64 += lib/x86_64/processor.c
40+
LIBKVM_x86_64 += lib/x86_64/sev.c
4041
LIBKVM_x86_64 += lib/x86_64/svm.c
4142
LIBKVM_x86_64 += lib/x86_64/ucall.c
4243
LIBKVM_x86_64 += lib/x86_64/vmx.c
@@ -118,6 +119,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/vmx_pmu_caps_test
118119
TEST_GEN_PROGS_x86_64 += x86_64/xen_shinfo_test
119120
TEST_GEN_PROGS_x86_64 += x86_64/xen_vmcall_test
120121
TEST_GEN_PROGS_x86_64 += x86_64/sev_migrate_tests
122+
TEST_GEN_PROGS_x86_64 += x86_64/sev_smoke_test
121123
TEST_GEN_PROGS_x86_64 += x86_64/amx_test
122124
TEST_GEN_PROGS_x86_64 += x86_64/max_vcpuid_cap_test
123125
TEST_GEN_PROGS_x86_64 += x86_64/triple_fault_event_test

tools/testing/selftests/kvm/guest_memfd_test.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -167,6 +167,9 @@ static void test_create_guest_memfd_multiple(struct kvm_vm *vm)
167167
TEST_ASSERT(ret != -1, "memfd fstat should succeed");
168168
TEST_ASSERT(st1.st_size == 4096, "first memfd st_size should still match requested size");
169169
TEST_ASSERT(st1.st_ino != st2.st_ino, "different memfd should have different inode numbers");
170+
171+
close(fd2);
172+
close(fd1);
170173
}
171174

172175
int main(int argc, char *argv[])
Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
/* SPDX-License-Identifier: GPL-2.0-only */
2+
#ifndef SELFTEST_KVM_UTIL_ARCH_H
3+
#define SELFTEST_KVM_UTIL_ARCH_H
4+
5+
struct kvm_vm_arch {};
6+
7+
#endif // SELFTEST_KVM_UTIL_ARCH_H
Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
/* SPDX-License-Identifier: GPL-2.0-only */
2+
/*
3+
* Macros for defining a KVM test
4+
*
5+
* Copyright (C) 2022, Google LLC.
6+
*/
7+
8+
#ifndef SELFTEST_KVM_TEST_HARNESS_H
9+
#define SELFTEST_KVM_TEST_HARNESS_H
10+
11+
#include "kselftest_harness.h"
12+
13+
#define KVM_ONE_VCPU_TEST_SUITE(name) \
14+
FIXTURE(name) { \
15+
struct kvm_vcpu *vcpu; \
16+
}; \
17+
\
18+
FIXTURE_SETUP(name) { \
19+
(void)vm_create_with_one_vcpu(&self->vcpu, NULL); \
20+
} \
21+
\
22+
FIXTURE_TEARDOWN(name) { \
23+
kvm_vm_free(self->vcpu->vm); \
24+
}
25+
26+
#define KVM_ONE_VCPU_TEST(suite, test, guestcode) \
27+
static void __suite##_##test(struct kvm_vcpu *vcpu); \
28+
\
29+
TEST_F(suite, test) \
30+
{ \
31+
vcpu_arch_set_entry_point(self->vcpu, guestcode); \
32+
__suite##_##test(self->vcpu); \
33+
} \
34+
static void __suite##_##test(struct kvm_vcpu *vcpu)
35+
36+
#endif /* SELFTEST_KVM_TEST_HARNESS_H */

tools/testing/selftests/kvm/include/kvm_util_base.h

Lines changed: 53 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -18,9 +18,11 @@
1818
#include <linux/types.h>
1919

2020
#include <asm/atomic.h>
21+
#include <asm/kvm.h>
2122

2223
#include <sys/ioctl.h>
2324

25+
#include "kvm_util_arch.h"
2426
#include "sparsebit.h"
2527

2628
/*
@@ -46,6 +48,7 @@ typedef uint64_t vm_vaddr_t; /* Virtual Machine (Guest) virtual address */
4648
struct userspace_mem_region {
4749
struct kvm_userspace_memory_region2 region;
4850
struct sparsebit *unused_phy_pages;
51+
struct sparsebit *protected_phy_pages;
4952
int fd;
5053
off_t offset;
5154
enum vm_mem_backing_src_type backing_src_type;
@@ -90,6 +93,7 @@ enum kvm_mem_region_type {
9093
struct kvm_vm {
9194
int mode;
9295
unsigned long type;
96+
uint8_t subtype;
9397
int kvm_fd;
9498
int fd;
9599
unsigned int pgtable_levels;
@@ -111,6 +115,9 @@ struct kvm_vm {
111115
vm_vaddr_t idt;
112116
vm_vaddr_t handlers;
113117
uint32_t dirty_ring_size;
118+
uint64_t gpa_tag_mask;
119+
120+
struct kvm_vm_arch arch;
114121

115122
/* Cache of information for binary stats interface */
116123
int stats_fd;
@@ -191,10 +198,14 @@ enum vm_guest_mode {
191198
};
192199

193200
struct vm_shape {
194-
enum vm_guest_mode mode;
195-
unsigned int type;
201+
uint32_t type;
202+
uint8_t mode;
203+
uint8_t subtype;
204+
uint16_t padding;
196205
};
197206

207+
kvm_static_assert(sizeof(struct vm_shape) == sizeof(uint64_t));
208+
198209
#define VM_TYPE_DEFAULT 0
199210

200211
#define VM_SHAPE(__mode) \
@@ -564,6 +575,13 @@ void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
564575
uint64_t guest_paddr, uint32_t slot, uint64_t npages,
565576
uint32_t flags, int guest_memfd_fd, uint64_t guest_memfd_offset);
566577

578+
#ifndef vm_arch_has_protected_memory
579+
static inline bool vm_arch_has_protected_memory(struct kvm_vm *vm)
580+
{
581+
return false;
582+
}
583+
#endif
584+
567585
void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags);
568586
void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa);
569587
void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot);
@@ -573,6 +591,9 @@ vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_mi
573591
vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
574592
vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
575593
enum kvm_mem_region_type type);
594+
vm_vaddr_t vm_vaddr_alloc_shared(struct kvm_vm *vm, size_t sz,
595+
vm_vaddr_t vaddr_min,
596+
enum kvm_mem_region_type type);
576597
vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages);
577598
vm_vaddr_t __vm_vaddr_alloc_page(struct kvm_vm *vm,
578599
enum kvm_mem_region_type type);
@@ -585,6 +606,12 @@ void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva);
585606
vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva);
586607
void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa);
587608

609+
610+
static inline vm_paddr_t vm_untag_gpa(struct kvm_vm *vm, vm_paddr_t gpa)
611+
{
612+
return gpa & ~vm->gpa_tag_mask;
613+
}
614+
588615
void vcpu_run(struct kvm_vcpu *vcpu);
589616
int _vcpu_run(struct kvm_vcpu *vcpu);
590617

@@ -827,10 +854,23 @@ const char *exit_reason_str(unsigned int exit_reason);
827854

828855
vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
829856
uint32_t memslot);
830-
vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
831-
vm_paddr_t paddr_min, uint32_t memslot);
857+
vm_paddr_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
858+
vm_paddr_t paddr_min, uint32_t memslot,
859+
bool protected);
832860
vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm);
833861

862+
static inline vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
863+
vm_paddr_t paddr_min, uint32_t memslot)
864+
{
865+
/*
866+
* By default, allocate memory as protected for VMs that support
867+
* protected memory, as the majority of memory for such VMs is
868+
* protected, i.e. using shared memory is effectively opt-in.
869+
*/
870+
return __vm_phy_pages_alloc(vm, num, paddr_min, memslot,
871+
vm_arch_has_protected_memory(vm));
872+
}
873+
834874
/*
835875
* ____vm_create() does KVM_CREATE_VM and little else. __vm_create() also
836876
* loads the test binary into guest memory and creates an IRQ chip (x86 only).
@@ -969,15 +1009,18 @@ static inline void vcpu_dump(FILE *stream, struct kvm_vcpu *vcpu,
9691009
* Input Args:
9701010
* vm - Virtual Machine
9711011
* vcpu_id - The id of the VCPU to add to the VM.
972-
* guest_code - The vCPU's entry point
9731012
*/
974-
struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
975-
void *guest_code);
1013+
struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id);
1014+
void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code);
9761015

9771016
static inline struct kvm_vcpu *vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
9781017
void *guest_code)
9791018
{
980-
return vm_arch_vcpu_add(vm, vcpu_id, guest_code);
1019+
struct kvm_vcpu *vcpu = vm_arch_vcpu_add(vm, vcpu_id);
1020+
1021+
vcpu_arch_set_entry_point(vcpu, guest_code);
1022+
1023+
return vcpu;
9811024
}
9821025

9831026
/* Re-create a vCPU after restarting a VM, e.g. for state save/restore tests. */
@@ -1081,6 +1124,8 @@ void kvm_selftest_arch_init(void);
10811124

10821125
void kvm_arch_vm_post_create(struct kvm_vm *vm);
10831126

1127+
bool vm_is_gpa_protected(struct kvm_vm *vm, vm_paddr_t paddr);
1128+
10841129
uint32_t guest_get_vcpuid(void);
10851130

10861131
#endif /* SELFTEST_KVM_UTIL_BASE_H */
Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
/* SPDX-License-Identifier: GPL-2.0-only */
2+
#ifndef SELFTEST_KVM_UTIL_ARCH_H
3+
#define SELFTEST_KVM_UTIL_ARCH_H
4+
5+
struct kvm_vm_arch {};
6+
7+
#endif // SELFTEST_KVM_UTIL_ARCH_H
Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
/* SPDX-License-Identifier: GPL-2.0-only */
2+
#ifndef SELFTEST_KVM_UTIL_ARCH_H
3+
#define SELFTEST_KVM_UTIL_ARCH_H
4+
5+
struct kvm_vm_arch {};
6+
7+
#endif // SELFTEST_KVM_UTIL_ARCH_H

tools/testing/selftests/kvm/include/sparsebit.h

Lines changed: 38 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -30,26 +30,26 @@ typedef uint64_t sparsebit_num_t;
3030

3131
struct sparsebit *sparsebit_alloc(void);
3232
void sparsebit_free(struct sparsebit **sbitp);
33-
void sparsebit_copy(struct sparsebit *dstp, struct sparsebit *src);
33+
void sparsebit_copy(struct sparsebit *dstp, const struct sparsebit *src);
3434

35-
bool sparsebit_is_set(struct sparsebit *sbit, sparsebit_idx_t idx);
36-
bool sparsebit_is_set_num(struct sparsebit *sbit,
35+
bool sparsebit_is_set(const struct sparsebit *sbit, sparsebit_idx_t idx);
36+
bool sparsebit_is_set_num(const struct sparsebit *sbit,
3737
sparsebit_idx_t idx, sparsebit_num_t num);
38-
bool sparsebit_is_clear(struct sparsebit *sbit, sparsebit_idx_t idx);
39-
bool sparsebit_is_clear_num(struct sparsebit *sbit,
38+
bool sparsebit_is_clear(const struct sparsebit *sbit, sparsebit_idx_t idx);
39+
bool sparsebit_is_clear_num(const struct sparsebit *sbit,
4040
sparsebit_idx_t idx, sparsebit_num_t num);
41-
sparsebit_num_t sparsebit_num_set(struct sparsebit *sbit);
42-
bool sparsebit_any_set(struct sparsebit *sbit);
43-
bool sparsebit_any_clear(struct sparsebit *sbit);
44-
bool sparsebit_all_set(struct sparsebit *sbit);
45-
bool sparsebit_all_clear(struct sparsebit *sbit);
46-
sparsebit_idx_t sparsebit_first_set(struct sparsebit *sbit);
47-
sparsebit_idx_t sparsebit_first_clear(struct sparsebit *sbit);
48-
sparsebit_idx_t sparsebit_next_set(struct sparsebit *sbit, sparsebit_idx_t prev);
49-
sparsebit_idx_t sparsebit_next_clear(struct sparsebit *sbit, sparsebit_idx_t prev);
50-
sparsebit_idx_t sparsebit_next_set_num(struct sparsebit *sbit,
41+
sparsebit_num_t sparsebit_num_set(const struct sparsebit *sbit);
42+
bool sparsebit_any_set(const struct sparsebit *sbit);
43+
bool sparsebit_any_clear(const struct sparsebit *sbit);
44+
bool sparsebit_all_set(const struct sparsebit *sbit);
45+
bool sparsebit_all_clear(const struct sparsebit *sbit);
46+
sparsebit_idx_t sparsebit_first_set(const struct sparsebit *sbit);
47+
sparsebit_idx_t sparsebit_first_clear(const struct sparsebit *sbit);
48+
sparsebit_idx_t sparsebit_next_set(const struct sparsebit *sbit, sparsebit_idx_t prev);
49+
sparsebit_idx_t sparsebit_next_clear(const struct sparsebit *sbit, sparsebit_idx_t prev);
50+
sparsebit_idx_t sparsebit_next_set_num(const struct sparsebit *sbit,
5151
sparsebit_idx_t start, sparsebit_num_t num);
52-
sparsebit_idx_t sparsebit_next_clear_num(struct sparsebit *sbit,
52+
sparsebit_idx_t sparsebit_next_clear_num(const struct sparsebit *sbit,
5353
sparsebit_idx_t start, sparsebit_num_t num);
5454

5555
void sparsebit_set(struct sparsebit *sbitp, sparsebit_idx_t idx);
@@ -62,9 +62,29 @@ void sparsebit_clear_num(struct sparsebit *sbitp,
6262
sparsebit_idx_t start, sparsebit_num_t num);
6363
void sparsebit_clear_all(struct sparsebit *sbitp);
6464

65-
void sparsebit_dump(FILE *stream, struct sparsebit *sbit,
65+
void sparsebit_dump(FILE *stream, const struct sparsebit *sbit,
6666
unsigned int indent);
67-
void sparsebit_validate_internal(struct sparsebit *sbit);
67+
void sparsebit_validate_internal(const struct sparsebit *sbit);
68+
69+
/*
70+
* Iterate over an inclusive ranges within sparsebit @s. In each iteration,
71+
* @range_begin and @range_end will take the beginning and end of the set
72+
* range, which are of type sparsebit_idx_t.
73+
*
74+
* For example, if the range [3, 7] (inclusive) is set, within the
75+
* iteration,@range_begin will take the value 3 and @range_end will take
76+
* the value 7.
77+
*
78+
* Ensure that there is at least one bit set before using this macro with
79+
* sparsebit_any_set(), because sparsebit_first_set() will abort if none
80+
* are set.
81+
*/
82+
#define sparsebit_for_each_set_range(s, range_begin, range_end) \
83+
for (range_begin = sparsebit_first_set(s), \
84+
range_end = sparsebit_next_clear(s, range_begin) - 1; \
85+
range_begin && range_end; \
86+
range_begin = sparsebit_next_set(s, range_end), \
87+
range_end = sparsebit_next_clear(s, range_begin) - 1)
6888

6989
#ifdef __cplusplus
7090
}
Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,23 @@
1+
/* SPDX-License-Identifier: GPL-2.0-only */
2+
#ifndef SELFTEST_KVM_UTIL_ARCH_H
3+
#define SELFTEST_KVM_UTIL_ARCH_H
4+
5+
#include <stdbool.h>
6+
#include <stdint.h>
7+
8+
struct kvm_vm_arch {
9+
uint64_t c_bit;
10+
uint64_t s_bit;
11+
int sev_fd;
12+
bool is_pt_protected;
13+
};
14+
15+
static inline bool __vm_arch_has_protected_memory(struct kvm_vm_arch *arch)
16+
{
17+
return arch->c_bit || arch->s_bit;
18+
}
19+
20+
#define vm_arch_has_protected_memory(vm) \
21+
__vm_arch_has_protected_memory(&(vm)->arch)
22+
23+
#endif // SELFTEST_KVM_UTIL_ARCH_H

tools/testing/selftests/kvm/include/x86_64/processor.h

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,12 @@
2323
extern bool host_cpu_is_intel;
2424
extern bool host_cpu_is_amd;
2525

26+
enum vm_guest_x86_subtype {
27+
VM_SUBTYPE_NONE = 0,
28+
VM_SUBTYPE_SEV,
29+
VM_SUBTYPE_SEV_ES,
30+
};
31+
2632
#define NMI_VECTOR 0x02
2733

2834
#define X86_EFLAGS_FIXED (1u << 1)
@@ -273,6 +279,7 @@ struct kvm_x86_cpu_property {
273279
#define X86_PROPERTY_MAX_EXT_LEAF KVM_X86_CPU_PROPERTY(0x80000000, 0, EAX, 0, 31)
274280
#define X86_PROPERTY_MAX_PHY_ADDR KVM_X86_CPU_PROPERTY(0x80000008, 0, EAX, 0, 7)
275281
#define X86_PROPERTY_MAX_VIRT_ADDR KVM_X86_CPU_PROPERTY(0x80000008, 0, EAX, 8, 15)
282+
#define X86_PROPERTY_SEV_C_BIT KVM_X86_CPU_PROPERTY(0x8000001F, 0, EBX, 0, 5)
276283
#define X86_PROPERTY_PHYS_ADDR_REDUCTION KVM_X86_CPU_PROPERTY(0x8000001F, 0, EBX, 6, 11)
277284

278285
#define X86_PROPERTY_MAX_CENTAUR_LEAF KVM_X86_CPU_PROPERTY(0xC0000000, 0, EAX, 0, 31)
@@ -1059,6 +1066,7 @@ do { \
10591066
} while (0)
10601067

10611068
void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits);
1069+
void kvm_init_vm_address_properties(struct kvm_vm *vm);
10621070
bool vm_is_unrestricted_guest(struct kvm_vm *vm);
10631071

10641072
struct ex_regs {

0 commit comments

Comments
 (0)