Skip to content

Commit 783e9cd

Browse files
committed
Merge tag 'kvm-x86-selftests-6.15' of https://github.com/kvm-x86/linux into HEAD
KVM selftests changes for 6.15, part 2 - Fix a variety of flaws, bugs, and false failures/passes dirty_log_test, and improve its coverage by collecting all dirty entries on each iteration. - Fix a few minor bugs related to handling of stats FDs. - Add infrastructure to make vCPU and VM stats FDs available to tests by default (open the FDs during VM/vCPU creation). - Relax an assertion on the number of HLT exits in the xAPIC IPI test when running on a CPU that supports AMD's Idle HLT (which elides interception of HLT if a virtual IRQ is pending and unmasked). - Misc cleanups and fixes.
2 parents 9b47f28 + 62838fa commit 783e9cd

File tree

9 files changed

+370
-356
lines changed

9 files changed

+370
-356
lines changed

tools/testing/selftests/kvm/dirty_log_test.c

Lines changed: 247 additions & 276 deletions
Large diffs are not rendered by default.

tools/testing/selftests/kvm/include/kvm_util.h

Lines changed: 21 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,12 @@ struct userspace_mem_region {
4646
struct hlist_node slot_node;
4747
};
4848

49+
struct kvm_binary_stats {
50+
int fd;
51+
struct kvm_stats_header header;
52+
struct kvm_stats_desc *desc;
53+
};
54+
4955
struct kvm_vcpu {
5056
struct list_head list;
5157
uint32_t id;
@@ -55,6 +61,7 @@ struct kvm_vcpu {
5561
#ifdef __x86_64__
5662
struct kvm_cpuid2 *cpuid;
5763
#endif
64+
struct kvm_binary_stats stats;
5865
struct kvm_dirty_gfn *dirty_gfns;
5966
uint32_t fetch_index;
6067
uint32_t dirty_gfns_count;
@@ -99,10 +106,7 @@ struct kvm_vm {
99106

100107
struct kvm_vm_arch arch;
101108

102-
/* Cache of information for binary stats interface */
103-
int stats_fd;
104-
struct kvm_stats_header stats_header;
105-
struct kvm_stats_desc *stats_desc;
109+
struct kvm_binary_stats stats;
106110

107111
/*
108112
* KVM region slots. These are the default memslots used by page
@@ -531,16 +535,19 @@ void read_stat_data(int stats_fd, struct kvm_stats_header *header,
531535
struct kvm_stats_desc *desc, uint64_t *data,
532536
size_t max_elements);
533537

534-
void __vm_get_stat(struct kvm_vm *vm, const char *stat_name, uint64_t *data,
535-
size_t max_elements);
538+
void kvm_get_stat(struct kvm_binary_stats *stats, const char *name,
539+
uint64_t *data, size_t max_elements);
536540

537-
static inline uint64_t vm_get_stat(struct kvm_vm *vm, const char *stat_name)
538-
{
539-
uint64_t data;
541+
#define __get_stat(stats, stat) \
542+
({ \
543+
uint64_t data; \
544+
\
545+
kvm_get_stat(stats, #stat, &data, 1); \
546+
data; \
547+
})
540548

541-
__vm_get_stat(vm, stat_name, &data, 1);
542-
return data;
543-
}
549+
#define vm_get_stat(vm, stat) __get_stat(&(vm)->stats, stat)
550+
#define vcpu_get_stat(vcpu, stat) __get_stat(&(vcpu)->stats, stat)
544551

545552
void vm_create_irqchip(struct kvm_vm *vm);
546553

@@ -963,6 +970,8 @@ static inline struct kvm_vm *vm_create_shape_with_one_vcpu(struct vm_shape shape
963970

964971
struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm);
965972

973+
void kvm_set_files_rlimit(uint32_t nr_vcpus);
974+
966975
void kvm_pin_this_task_to_pcpu(uint32_t pcpu);
967976
void kvm_print_vcpu_pinning_help(void);
968977
void kvm_parse_vcpu_pinning(const char *pcpus_string, uint32_t vcpu_to_pcpu[],

tools/testing/selftests/kvm/include/x86/processor.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -200,6 +200,7 @@ struct kvm_x86_cpu_feature {
200200
#define X86_FEATURE_PAUSEFILTER KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 10)
201201
#define X86_FEATURE_PFTHRESHOLD KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 12)
202202
#define X86_FEATURE_VGIF KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 16)
203+
#define X86_FEATURE_IDLE_HLT KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 30)
203204
#define X86_FEATURE_SEV KVM_X86_CPU_FEATURE(0x8000001F, 0, EAX, 1)
204205
#define X86_FEATURE_SEV_ES KVM_X86_CPU_FEATURE(0x8000001F, 0, EAX, 3)
205206
#define X86_FEATURE_PERFMON_V2 KVM_X86_CPU_FEATURE(0x80000022, 0, EAX, 0)
@@ -1251,7 +1252,7 @@ void vm_install_exception_handler(struct kvm_vm *vm, int vector,
12511252
uint64_t ign_error_code; \
12521253
uint8_t vector; \
12531254
\
1254-
asm volatile(KVM_ASM_SAFE(insn) \
1255+
asm volatile(KVM_ASM_SAFE_FEP(insn) \
12551256
: KVM_ASM_SAFE_OUTPUTS(vector, ign_error_code) \
12561257
: inputs \
12571258
: KVM_ASM_SAFE_CLOBBERS); \

tools/testing/selftests/kvm/kvm_create_max_vcpus.c

Lines changed: 1 addition & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,6 @@
1010
#include <stdio.h>
1111
#include <stdlib.h>
1212
#include <string.h>
13-
#include <sys/resource.h>
1413

1514
#include "test_util.h"
1615

@@ -39,36 +38,11 @@ int main(int argc, char *argv[])
3938
{
4039
int kvm_max_vcpu_id = kvm_check_cap(KVM_CAP_MAX_VCPU_ID);
4140
int kvm_max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS);
42-
/*
43-
* Number of file descriptors reqired, KVM_CAP_MAX_VCPUS for vCPU fds +
44-
* an arbitrary number for everything else.
45-
*/
46-
int nr_fds_wanted = kvm_max_vcpus + 100;
47-
struct rlimit rl;
4841

4942
pr_info("KVM_CAP_MAX_VCPU_ID: %d\n", kvm_max_vcpu_id);
5043
pr_info("KVM_CAP_MAX_VCPUS: %d\n", kvm_max_vcpus);
5144

52-
/*
53-
* Check that we're allowed to open nr_fds_wanted file descriptors and
54-
* try raising the limits if needed.
55-
*/
56-
TEST_ASSERT(!getrlimit(RLIMIT_NOFILE, &rl), "getrlimit() failed!");
57-
58-
if (rl.rlim_cur < nr_fds_wanted) {
59-
rl.rlim_cur = nr_fds_wanted;
60-
if (rl.rlim_max < nr_fds_wanted) {
61-
int old_rlim_max = rl.rlim_max;
62-
rl.rlim_max = nr_fds_wanted;
63-
64-
int r = setrlimit(RLIMIT_NOFILE, &rl);
65-
__TEST_REQUIRE(r >= 0,
66-
"RLIMIT_NOFILE hard limit is too low (%d, wanted %d)",
67-
old_rlim_max, nr_fds_wanted);
68-
} else {
69-
TEST_ASSERT(!setrlimit(RLIMIT_NOFILE, &rl), "setrlimit() failed!");
70-
}
71-
}
45+
kvm_set_files_rlimit(kvm_max_vcpus);
7246

7347
/*
7448
* Upstream KVM prior to 4.8 does not support KVM_CAP_MAX_VCPU_ID.

tools/testing/selftests/kvm/lib/kvm_util.c

Lines changed: 80 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@
1212
#include <assert.h>
1313
#include <sched.h>
1414
#include <sys/mman.h>
15+
#include <sys/resource.h>
1516
#include <sys/types.h>
1617
#include <sys/stat.h>
1718
#include <unistd.h>
@@ -196,6 +197,11 @@ static void vm_open(struct kvm_vm *vm)
196197

197198
vm->fd = __kvm_ioctl(vm->kvm_fd, KVM_CREATE_VM, (void *)vm->type);
198199
TEST_ASSERT(vm->fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_VM, vm->fd));
200+
201+
if (kvm_has_cap(KVM_CAP_BINARY_STATS_FD))
202+
vm->stats.fd = vm_get_stats_fd(vm);
203+
else
204+
vm->stats.fd = -1;
199205
}
200206

201207
const char *vm_guest_mode_string(uint32_t i)
@@ -406,6 +412,38 @@ static uint64_t vm_nr_pages_required(enum vm_guest_mode mode,
406412
return vm_adjust_num_guest_pages(mode, nr_pages);
407413
}
408414

415+
void kvm_set_files_rlimit(uint32_t nr_vcpus)
416+
{
417+
/*
418+
* Each vCPU will open two file descriptors: the vCPU itself and the
419+
* vCPU's binary stats file descriptor. Add an arbitrary amount of
420+
* buffer for all other files a test may open.
421+
*/
422+
int nr_fds_wanted = nr_vcpus * 2 + 100;
423+
struct rlimit rl;
424+
425+
/*
426+
* Check that we're allowed to open nr_fds_wanted file descriptors and
427+
* try raising the limits if needed.
428+
*/
429+
TEST_ASSERT(!getrlimit(RLIMIT_NOFILE, &rl), "getrlimit() failed!");
430+
431+
if (rl.rlim_cur < nr_fds_wanted) {
432+
rl.rlim_cur = nr_fds_wanted;
433+
if (rl.rlim_max < nr_fds_wanted) {
434+
int old_rlim_max = rl.rlim_max;
435+
436+
rl.rlim_max = nr_fds_wanted;
437+
__TEST_REQUIRE(setrlimit(RLIMIT_NOFILE, &rl) >= 0,
438+
"RLIMIT_NOFILE hard limit is too low (%d, wanted %d)",
439+
old_rlim_max, nr_fds_wanted);
440+
} else {
441+
TEST_ASSERT(!setrlimit(RLIMIT_NOFILE, &rl), "setrlimit() failed!");
442+
}
443+
}
444+
445+
}
446+
409447
struct kvm_vm *__vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus,
410448
uint64_t nr_extra_pages)
411449
{
@@ -415,6 +453,8 @@ struct kvm_vm *__vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus,
415453
struct kvm_vm *vm;
416454
int i;
417455

456+
kvm_set_files_rlimit(nr_runnable_vcpus);
457+
418458
pr_debug("%s: mode='%s' type='%d', pages='%ld'\n", __func__,
419459
vm_guest_mode_string(shape.mode), shape.type, nr_pages);
420460

@@ -657,6 +697,23 @@ userspace_mem_region_find(struct kvm_vm *vm, uint64_t start, uint64_t end)
657697
return NULL;
658698
}
659699

700+
static void kvm_stats_release(struct kvm_binary_stats *stats)
701+
{
702+
int ret;
703+
704+
if (stats->fd < 0)
705+
return;
706+
707+
if (stats->desc) {
708+
free(stats->desc);
709+
stats->desc = NULL;
710+
}
711+
712+
ret = close(stats->fd);
713+
TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("close()", ret));
714+
stats->fd = -1;
715+
}
716+
660717
__weak void vcpu_arch_free(struct kvm_vcpu *vcpu)
661718
{
662719

@@ -690,6 +747,8 @@ static void vm_vcpu_rm(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
690747
ret = close(vcpu->fd);
691748
TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("close()", ret));
692749

750+
kvm_stats_release(&vcpu->stats);
751+
693752
list_del(&vcpu->list);
694753

695754
vcpu_arch_free(vcpu);
@@ -709,6 +768,9 @@ void kvm_vm_release(struct kvm_vm *vmp)
709768

710769
ret = close(vmp->kvm_fd);
711770
TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("close()", ret));
771+
772+
/* Free cached stats metadata and close FD */
773+
kvm_stats_release(&vmp->stats);
712774
}
713775

714776
static void __vm_mem_region_delete(struct kvm_vm *vm,
@@ -748,12 +810,6 @@ void kvm_vm_free(struct kvm_vm *vmp)
748810
if (vmp == NULL)
749811
return;
750812

751-
/* Free cached stats metadata and close FD */
752-
if (vmp->stats_fd) {
753-
free(vmp->stats_desc);
754-
close(vmp->stats_fd);
755-
}
756-
757813
/* Free userspace_mem_regions. */
758814
hash_for_each_safe(vmp->regions.slot_hash, ctr, node, region, slot_node)
759815
__vm_mem_region_delete(vmp, region);
@@ -1286,6 +1342,11 @@ struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
12861342
TEST_ASSERT(vcpu->run != MAP_FAILED,
12871343
__KVM_SYSCALL_ERROR("mmap()", (int)(unsigned long)MAP_FAILED));
12881344

1345+
if (kvm_has_cap(KVM_CAP_BINARY_STATS_FD))
1346+
vcpu->stats.fd = vcpu_get_stats_fd(vcpu);
1347+
else
1348+
vcpu->stats.fd = -1;
1349+
12891350
/* Add to linked-list of VCPUs. */
12901351
list_add(&vcpu->list, &vm->vcpus);
12911352

@@ -2198,46 +2259,31 @@ void read_stat_data(int stats_fd, struct kvm_stats_header *header,
21982259
desc->name, size, ret);
21992260
}
22002261

2201-
/*
2202-
* Read the data of the named stat
2203-
*
2204-
* Input Args:
2205-
* vm - the VM for which the stat should be read
2206-
* stat_name - the name of the stat to read
2207-
* max_elements - the maximum number of 8-byte values to read into data
2208-
*
2209-
* Output Args:
2210-
* data - the buffer into which stat data should be read
2211-
*
2212-
* Read the data values of a specified stat from the binary stats interface.
2213-
*/
2214-
void __vm_get_stat(struct kvm_vm *vm, const char *stat_name, uint64_t *data,
2215-
size_t max_elements)
2262+
void kvm_get_stat(struct kvm_binary_stats *stats, const char *name,
2263+
uint64_t *data, size_t max_elements)
22162264
{
22172265
struct kvm_stats_desc *desc;
22182266
size_t size_desc;
22192267
int i;
22202268

2221-
if (!vm->stats_fd) {
2222-
vm->stats_fd = vm_get_stats_fd(vm);
2223-
read_stats_header(vm->stats_fd, &vm->stats_header);
2224-
vm->stats_desc = read_stats_descriptors(vm->stats_fd,
2225-
&vm->stats_header);
2269+
if (!stats->desc) {
2270+
read_stats_header(stats->fd, &stats->header);
2271+
stats->desc = read_stats_descriptors(stats->fd, &stats->header);
22262272
}
22272273

2228-
size_desc = get_stats_descriptor_size(&vm->stats_header);
2274+
size_desc = get_stats_descriptor_size(&stats->header);
22292275

2230-
for (i = 0; i < vm->stats_header.num_desc; ++i) {
2231-
desc = (void *)vm->stats_desc + (i * size_desc);
2276+
for (i = 0; i < stats->header.num_desc; ++i) {
2277+
desc = (void *)stats->desc + (i * size_desc);
22322278

2233-
if (strcmp(desc->name, stat_name))
2279+
if (strcmp(desc->name, name))
22342280
continue;
22352281

2236-
read_stat_data(vm->stats_fd, &vm->stats_header, desc,
2237-
data, max_elements);
2238-
2239-
break;
2282+
read_stat_data(stats->fd, &stats->header, desc, data, max_elements);
2283+
return;
22402284
}
2285+
2286+
TEST_FAIL("Unable to find stat '%s'", name);
22412287
}
22422288

22432289
__weak void kvm_arch_vm_post_create(struct kvm_vm *vm)

tools/testing/selftests/kvm/lib/userfaultfd_util.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -114,7 +114,7 @@ struct uffd_desc *uffd_setup_demand_paging(int uffd_mode, useconds_t delay,
114114

115115
PER_PAGE_DEBUG("Userfaultfd %s mode, faults resolved with %s\n",
116116
is_minor ? "MINOR" : "MISSING",
117-
is_minor ? "UFFDIO_CONINUE" : "UFFDIO_COPY");
117+
is_minor ? "UFFDIO_CONTINUE" : "UFFDIO_COPY");
118118

119119
uffd_desc = malloc(sizeof(struct uffd_desc));
120120
TEST_ASSERT(uffd_desc, "Failed to malloc uffd descriptor");

tools/testing/selftests/kvm/x86/dirty_log_page_splitting_test.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -41,9 +41,9 @@ struct kvm_page_stats {
4141

4242
static void get_page_stats(struct kvm_vm *vm, struct kvm_page_stats *stats, const char *stage)
4343
{
44-
stats->pages_4k = vm_get_stat(vm, "pages_4k");
45-
stats->pages_2m = vm_get_stat(vm, "pages_2m");
46-
stats->pages_1g = vm_get_stat(vm, "pages_1g");
44+
stats->pages_4k = vm_get_stat(vm, pages_4k);
45+
stats->pages_2m = vm_get_stat(vm, pages_2m);
46+
stats->pages_1g = vm_get_stat(vm, pages_1g);
4747
stats->hugepages = stats->pages_2m + stats->pages_1g;
4848

4949
pr_debug("\nPage stats after %s: 4K: %ld 2M: %ld 1G: %ld huge: %ld\n",

tools/testing/selftests/kvm/x86/nx_huge_pages_test.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,7 @@ static void check_2m_page_count(struct kvm_vm *vm, int expected_pages_2m)
7373
{
7474
int actual_pages_2m;
7575

76-
actual_pages_2m = vm_get_stat(vm, "pages_2m");
76+
actual_pages_2m = vm_get_stat(vm, pages_2m);
7777

7878
TEST_ASSERT(actual_pages_2m == expected_pages_2m,
7979
"Unexpected 2m page count. Expected %d, got %d",
@@ -84,7 +84,7 @@ static void check_split_count(struct kvm_vm *vm, int expected_splits)
8484
{
8585
int actual_splits;
8686

87-
actual_splits = vm_get_stat(vm, "nx_lpage_splits");
87+
actual_splits = vm_get_stat(vm, nx_lpage_splits);
8888

8989
TEST_ASSERT(actual_splits == expected_splits,
9090
"Unexpected NX huge page split count. Expected %d, got %d",

tools/testing/selftests/kvm/x86/xapic_ipi_test.c

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -466,6 +466,19 @@ int main(int argc, char *argv[])
466466
cancel_join_vcpu_thread(threads[0], params[0].vcpu);
467467
cancel_join_vcpu_thread(threads[1], params[1].vcpu);
468468

469+
/*
470+
* If the host support Idle HLT, i.e. KVM *might* be using Idle HLT,
471+
* then the number of HLT exits may be less than the number of HLTs
472+
* that were executed, as Idle HLT elides the exit if the vCPU has an
473+
* unmasked, pending IRQ (or NMI).
474+
*/
475+
if (this_cpu_has(X86_FEATURE_IDLE_HLT))
476+
TEST_ASSERT(data->hlt_count >= vcpu_get_stat(params[0].vcpu, halt_exits),
477+
"HLT insns = %lu, HLT exits = %lu",
478+
data->hlt_count, vcpu_get_stat(params[0].vcpu, halt_exits));
479+
else
480+
TEST_ASSERT_EQ(data->hlt_count, vcpu_get_stat(params[0].vcpu, halt_exits));
481+
469482
fprintf(stderr,
470483
"Test successful after running for %d seconds.\n"
471484
"Sending vCPU sent %lu IPIs to halting vCPU\n"

0 commit comments

Comments
 (0)