Skip to content

Commit 683b783

Browse files
committed
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM fixes from Paolo Bonzini: "ARM: - Avoid dropping the page refcount twice when freeing an unlinked page-table subtree. - Don't source the VFIO Kconfig twice - Fix protected-mode locking order between kvm and vcpus RISC-V: - Fix steal-time related sparse warnings x86: - Cleanup gtod_is_based_on_tsc() to return "bool" instead of an "int" - Make a KVM_REQ_NMI request while handling KVM_SET_VCPU_EVENTS if and only if the incoming events->nmi.pending is non-zero. If the target vCPU is in the UNITIALIZED state, the spurious request will result in KVM exiting to userspace, which in turn causes QEMU to constantly acquire and release QEMU's global mutex, to the point where the BSP is unable to make forward progress. - Fix a type (u8 versus u64) goof that results in pmu->fixed_ctr_ctrl being incorrectly truncated, and ultimately causes KVM to think a fixed counter has already been disabled (KVM thinks the old value is '0'). - Fix a stack leak in KVM_GET_MSRS where a failed MSR read from userspace that is ultimately ignored due to ignore_msrs=true doesn't zero the output as intended. Selftests cleanups and fixes: - Remove redundant newlines from error messages. - Delete an unused variable in the AMX test (which causes build failures when compiling with -Werror). - Fail instead of skipping tests if open(), e.g. of /dev/kvm, fails with an error code other than ENOENT (a Hyper-V selftest bug resulted in an EMFILE, and the test eventually got skipped). - Fix TSC related bugs in several Hyper-V selftests. - Fix a bug in the dirty ring logging test where a sem_post() could be left pending across multiple runs, resulting in incorrect synchronization between the main thread and the vCPU worker thread. - Relax the dirty log split test's assertions on 4KiB mappings to fix false positives due to the number of mappings for memslot 0 (used for code and data that is NOT being dirty logged) changing, e.g. due to NUMA balancing" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (25 commits) KVM: arm64: Fix double-free following kvm_pgtable_stage2_free_unlinked() RISC-V: KVM: Use correct restricted types RISC-V: paravirt: Use correct restricted types RISC-V: paravirt: steal_time should be static KVM: selftests: Don't assert on exact number of 4KiB in dirty log split test KVM: selftests: Fix a semaphore imbalance in the dirty ring logging test KVM: x86: Fix KVM_GET_MSRS stack info leak KVM: arm64: Do not source virt/lib/Kconfig twice KVM: x86/pmu: Fix type length error when reading pmu->fixed_ctr_ctrl KVM: x86: Make gtod_is_based_on_tsc() return 'bool' KVM: selftests: Make hyperv_clock require TSC based system clocksource KVM: selftests: Run clocksource dependent tests with hyperv_clocksource_tsc_page too KVM: selftests: Use generic sys_clocksource_is_tsc() in vmx_nested_tsc_scaling_test KVM: selftests: Generalize check_clocksource() from kvm_clock_test KVM: x86: make KVM_REQ_NMI request iff NMI pending for vcpu KVM: arm64: Fix circular locking dependency KVM: selftests: Fail tests when open() fails with !ENOENT KVM: selftests: Avoid infinite loop in hyperv_features when invtsc is missing KVM: selftests: Delete superfluous, unused "stage" variable in AMX test KVM: selftests: x86_64: Remove redundant newlines ...
2 parents 4b6f7c6 + 9895cee commit 683b783

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

65 files changed

+277
-276
lines changed

arch/arm64/kvm/Kconfig

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,6 @@
33
# KVM configuration
44
#
55

6-
source "virt/lib/Kconfig"
76
source "virt/kvm/Kconfig"
87

98
menuconfig VIRTUALIZATION

arch/arm64/kvm/hyp/pgtable.c

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1419,7 +1419,6 @@ kvm_pte_t *kvm_pgtable_stage2_create_unlinked(struct kvm_pgtable *pgt,
14191419
level + 1);
14201420
if (ret) {
14211421
kvm_pgtable_stage2_free_unlinked(mm_ops, pgtable, level);
1422-
mm_ops->put_page(pgtable);
14231422
return ERR_PTR(ret);
14241423
}
14251424

@@ -1502,7 +1501,6 @@ static int stage2_split_walker(const struct kvm_pgtable_visit_ctx *ctx,
15021501

15031502
if (!stage2_try_break_pte(ctx, mmu)) {
15041503
kvm_pgtable_stage2_free_unlinked(mm_ops, childp, level);
1505-
mm_ops->put_page(childp);
15061504
return -EAGAIN;
15071505
}
15081506

arch/arm64/kvm/pkvm.c

Lines changed: 17 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -101,6 +101,17 @@ void __init kvm_hyp_reserve(void)
101101
hyp_mem_base);
102102
}
103103

104+
static void __pkvm_destroy_hyp_vm(struct kvm *host_kvm)
105+
{
106+
if (host_kvm->arch.pkvm.handle) {
107+
WARN_ON(kvm_call_hyp_nvhe(__pkvm_teardown_vm,
108+
host_kvm->arch.pkvm.handle));
109+
}
110+
111+
host_kvm->arch.pkvm.handle = 0;
112+
free_hyp_memcache(&host_kvm->arch.pkvm.teardown_mc);
113+
}
114+
104115
/*
105116
* Allocates and donates memory for hypervisor VM structs at EL2.
106117
*
@@ -181,7 +192,7 @@ static int __pkvm_create_hyp_vm(struct kvm *host_kvm)
181192
return 0;
182193

183194
destroy_vm:
184-
pkvm_destroy_hyp_vm(host_kvm);
195+
__pkvm_destroy_hyp_vm(host_kvm);
185196
return ret;
186197
free_vm:
187198
free_pages_exact(hyp_vm, hyp_vm_sz);
@@ -194,23 +205,19 @@ int pkvm_create_hyp_vm(struct kvm *host_kvm)
194205
{
195206
int ret = 0;
196207

197-
mutex_lock(&host_kvm->lock);
208+
mutex_lock(&host_kvm->arch.config_lock);
198209
if (!host_kvm->arch.pkvm.handle)
199210
ret = __pkvm_create_hyp_vm(host_kvm);
200-
mutex_unlock(&host_kvm->lock);
211+
mutex_unlock(&host_kvm->arch.config_lock);
201212

202213
return ret;
203214
}
204215

205216
void pkvm_destroy_hyp_vm(struct kvm *host_kvm)
206217
{
207-
if (host_kvm->arch.pkvm.handle) {
208-
WARN_ON(kvm_call_hyp_nvhe(__pkvm_teardown_vm,
209-
host_kvm->arch.pkvm.handle));
210-
}
211-
212-
host_kvm->arch.pkvm.handle = 0;
213-
free_hyp_memcache(&host_kvm->arch.pkvm.teardown_mc);
218+
mutex_lock(&host_kvm->arch.config_lock);
219+
__pkvm_destroy_hyp_vm(host_kvm);
220+
mutex_unlock(&host_kvm->arch.config_lock);
214221
}
215222

216223
int pkvm_init_host_vm(struct kvm *host_kvm)

arch/riscv/kernel/paravirt.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ static int __init parse_no_stealacc(char *arg)
4141

4242
early_param("no-steal-acc", parse_no_stealacc);
4343

44-
DEFINE_PER_CPU(struct sbi_sta_struct, steal_time) __aligned(64);
44+
static DEFINE_PER_CPU(struct sbi_sta_struct, steal_time) __aligned(64);
4545

4646
static bool __init has_pv_steal_clock(void)
4747
{
@@ -91,8 +91,8 @@ static int pv_time_cpu_down_prepare(unsigned int cpu)
9191
static u64 pv_time_steal_clock(int cpu)
9292
{
9393
struct sbi_sta_struct *st = per_cpu_ptr(&steal_time, cpu);
94-
u32 sequence;
95-
u64 steal;
94+
__le32 sequence;
95+
__le64 steal;
9696

9797
/*
9898
* Check the sequence field before and after reading the steal

arch/riscv/kvm/vcpu_sbi_sta.c

Lines changed: 12 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -26,8 +26,12 @@ void kvm_riscv_vcpu_record_steal_time(struct kvm_vcpu *vcpu)
2626
{
2727
gpa_t shmem = vcpu->arch.sta.shmem;
2828
u64 last_steal = vcpu->arch.sta.last_steal;
29-
u32 *sequence_ptr, sequence;
30-
u64 *steal_ptr, steal;
29+
__le32 __user *sequence_ptr;
30+
__le64 __user *steal_ptr;
31+
__le32 sequence_le;
32+
__le64 steal_le;
33+
u32 sequence;
34+
u64 steal;
3135
unsigned long hva;
3236
gfn_t gfn;
3337

@@ -47,22 +51,22 @@ void kvm_riscv_vcpu_record_steal_time(struct kvm_vcpu *vcpu)
4751
return;
4852
}
4953

50-
sequence_ptr = (u32 *)(hva + offset_in_page(shmem) +
54+
sequence_ptr = (__le32 __user *)(hva + offset_in_page(shmem) +
5155
offsetof(struct sbi_sta_struct, sequence));
52-
steal_ptr = (u64 *)(hva + offset_in_page(shmem) +
56+
steal_ptr = (__le64 __user *)(hva + offset_in_page(shmem) +
5357
offsetof(struct sbi_sta_struct, steal));
5458

55-
if (WARN_ON(get_user(sequence, sequence_ptr)))
59+
if (WARN_ON(get_user(sequence_le, sequence_ptr)))
5660
return;
5761

58-
sequence = le32_to_cpu(sequence);
62+
sequence = le32_to_cpu(sequence_le);
5963
sequence += 1;
6064

6165
if (WARN_ON(put_user(cpu_to_le32(sequence), sequence_ptr)))
6266
return;
6367

64-
if (!WARN_ON(get_user(steal, steal_ptr))) {
65-
steal = le64_to_cpu(steal);
68+
if (!WARN_ON(get_user(steal_le, steal_ptr))) {
69+
steal = le64_to_cpu(steal_le);
6670
vcpu->arch.sta.last_steal = READ_ONCE(current->sched_info.run_delay);
6771
steal += vcpu->arch.sta.last_steal - last_steal;
6872
WARN_ON(put_user(cpu_to_le64(steal), steal_ptr));

arch/x86/kvm/vmx/pmu_intel.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,7 @@ static int fixed_pmc_events[] = {
7171
static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
7272
{
7373
struct kvm_pmc *pmc;
74-
u8 old_fixed_ctr_ctrl = pmu->fixed_ctr_ctrl;
74+
u64 old_fixed_ctr_ctrl = pmu->fixed_ctr_ctrl;
7575
int i;
7676

7777
pmu->fixed_ctr_ctrl = data;

arch/x86/kvm/x86.c

Lines changed: 8 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1704,22 +1704,17 @@ static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
17041704
struct kvm_msr_entry msr;
17051705
int r;
17061706

1707+
/* Unconditionally clear the output for simplicity */
1708+
msr.data = 0;
17071709
msr.index = index;
17081710
r = kvm_get_msr_feature(&msr);
17091711

1710-
if (r == KVM_MSR_RET_INVALID) {
1711-
/* Unconditionally clear the output for simplicity */
1712-
*data = 0;
1713-
if (kvm_msr_ignored_check(index, 0, false))
1714-
r = 0;
1715-
}
1716-
1717-
if (r)
1718-
return r;
1712+
if (r == KVM_MSR_RET_INVALID && kvm_msr_ignored_check(index, 0, false))
1713+
r = 0;
17191714

17201715
*data = msr.data;
17211716

1722-
return 0;
1717+
return r;
17231718
}
17241719

17251720
static bool __kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
@@ -2511,7 +2506,7 @@ static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
25112506
}
25122507

25132508
#ifdef CONFIG_X86_64
2514-
static inline int gtod_is_based_on_tsc(int mode)
2509+
static inline bool gtod_is_based_on_tsc(int mode)
25152510
{
25162511
return mode == VDSO_CLOCKMODE_TSC || mode == VDSO_CLOCKMODE_HVCLOCK;
25172512
}
@@ -5458,7 +5453,8 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
54585453
if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING) {
54595454
vcpu->arch.nmi_pending = 0;
54605455
atomic_set(&vcpu->arch.nmi_queued, events->nmi.pending);
5461-
kvm_make_request(KVM_REQ_NMI, vcpu);
5456+
if (events->nmi.pending)
5457+
kvm_make_request(KVM_REQ_NMI, vcpu);
54625458
}
54635459
static_call(kvm_x86_set_nmi_mask)(vcpu, events->nmi.masked);
54645460

tools/testing/selftests/kvm/aarch64/arch_timer.c

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -248,7 +248,7 @@ static void *test_vcpu_run(void *arg)
248248
REPORT_GUEST_ASSERT(uc);
249249
break;
250250
default:
251-
TEST_FAIL("Unexpected guest exit\n");
251+
TEST_FAIL("Unexpected guest exit");
252252
}
253253

254254
return NULL;
@@ -287,7 +287,7 @@ static int test_migrate_vcpu(unsigned int vcpu_idx)
287287

288288
/* Allow the error where the vCPU thread is already finished */
289289
TEST_ASSERT(ret == 0 || ret == ESRCH,
290-
"Failed to migrate the vCPU:%u to pCPU: %u; ret: %d\n",
290+
"Failed to migrate the vCPU:%u to pCPU: %u; ret: %d",
291291
vcpu_idx, new_pcpu, ret);
292292

293293
return ret;
@@ -326,12 +326,12 @@ static void test_run(struct kvm_vm *vm)
326326

327327
pthread_mutex_init(&vcpu_done_map_lock, NULL);
328328
vcpu_done_map = bitmap_zalloc(test_args.nr_vcpus);
329-
TEST_ASSERT(vcpu_done_map, "Failed to allocate vcpu done bitmap\n");
329+
TEST_ASSERT(vcpu_done_map, "Failed to allocate vcpu done bitmap");
330330

331331
for (i = 0; i < (unsigned long)test_args.nr_vcpus; i++) {
332332
ret = pthread_create(&pt_vcpu_run[i], NULL, test_vcpu_run,
333333
(void *)(unsigned long)i);
334-
TEST_ASSERT(!ret, "Failed to create vCPU-%d pthread\n", i);
334+
TEST_ASSERT(!ret, "Failed to create vCPU-%d pthread", i);
335335
}
336336

337337
/* Spawn a thread to control the vCPU migrations */
@@ -340,7 +340,7 @@ static void test_run(struct kvm_vm *vm)
340340

341341
ret = pthread_create(&pt_vcpu_migration, NULL,
342342
test_vcpu_migration, NULL);
343-
TEST_ASSERT(!ret, "Failed to create the migration pthread\n");
343+
TEST_ASSERT(!ret, "Failed to create the migration pthread");
344344
}
345345

346346

@@ -384,7 +384,7 @@ static struct kvm_vm *test_vm_create(void)
384384
if (kvm_has_cap(KVM_CAP_COUNTER_OFFSET))
385385
vm_ioctl(vm, KVM_ARM_SET_COUNTER_OFFSET, &test_args.offset);
386386
else
387-
TEST_FAIL("no support for global offset\n");
387+
TEST_FAIL("no support for global offset");
388388
}
389389

390390
for (i = 0; i < nr_vcpus; i++)

tools/testing/selftests/kvm/aarch64/hypercalls.c

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -175,18 +175,18 @@ static void test_fw_regs_before_vm_start(struct kvm_vcpu *vcpu)
175175
/* First 'read' should be an upper limit of the features supported */
176176
vcpu_get_reg(vcpu, reg_info->reg, &val);
177177
TEST_ASSERT(val == FW_REG_ULIMIT_VAL(reg_info->max_feat_bit),
178-
"Expected all the features to be set for reg: 0x%lx; expected: 0x%lx; read: 0x%lx\n",
178+
"Expected all the features to be set for reg: 0x%lx; expected: 0x%lx; read: 0x%lx",
179179
reg_info->reg, FW_REG_ULIMIT_VAL(reg_info->max_feat_bit), val);
180180

181181
/* Test a 'write' by disabling all the features of the register map */
182182
ret = __vcpu_set_reg(vcpu, reg_info->reg, 0);
183183
TEST_ASSERT(ret == 0,
184-
"Failed to clear all the features of reg: 0x%lx; ret: %d\n",
184+
"Failed to clear all the features of reg: 0x%lx; ret: %d",
185185
reg_info->reg, errno);
186186

187187
vcpu_get_reg(vcpu, reg_info->reg, &val);
188188
TEST_ASSERT(val == 0,
189-
"Expected all the features to be cleared for reg: 0x%lx\n", reg_info->reg);
189+
"Expected all the features to be cleared for reg: 0x%lx", reg_info->reg);
190190

191191
/*
192192
* Test enabling a feature that's not supported.
@@ -195,7 +195,7 @@ static void test_fw_regs_before_vm_start(struct kvm_vcpu *vcpu)
195195
if (reg_info->max_feat_bit < 63) {
196196
ret = __vcpu_set_reg(vcpu, reg_info->reg, BIT(reg_info->max_feat_bit + 1));
197197
TEST_ASSERT(ret != 0 && errno == EINVAL,
198-
"Unexpected behavior or return value (%d) while setting an unsupported feature for reg: 0x%lx\n",
198+
"Unexpected behavior or return value (%d) while setting an unsupported feature for reg: 0x%lx",
199199
errno, reg_info->reg);
200200
}
201201
}
@@ -216,7 +216,7 @@ static void test_fw_regs_after_vm_start(struct kvm_vcpu *vcpu)
216216
*/
217217
vcpu_get_reg(vcpu, reg_info->reg, &val);
218218
TEST_ASSERT(val == 0,
219-
"Expected all the features to be cleared for reg: 0x%lx\n",
219+
"Expected all the features to be cleared for reg: 0x%lx",
220220
reg_info->reg);
221221

222222
/*
@@ -226,7 +226,7 @@ static void test_fw_regs_after_vm_start(struct kvm_vcpu *vcpu)
226226
*/
227227
ret = __vcpu_set_reg(vcpu, reg_info->reg, FW_REG_ULIMIT_VAL(reg_info->max_feat_bit));
228228
TEST_ASSERT(ret != 0 && errno == EBUSY,
229-
"Unexpected behavior or return value (%d) while setting a feature while VM is running for reg: 0x%lx\n",
229+
"Unexpected behavior or return value (%d) while setting a feature while VM is running for reg: 0x%lx",
230230
errno, reg_info->reg);
231231
}
232232
}
@@ -265,7 +265,7 @@ static void test_guest_stage(struct kvm_vm **vm, struct kvm_vcpu **vcpu)
265265
case TEST_STAGE_HVC_IFACE_FALSE_INFO:
266266
break;
267267
default:
268-
TEST_FAIL("Unknown test stage: %d\n", prev_stage);
268+
TEST_FAIL("Unknown test stage: %d", prev_stage);
269269
}
270270
}
271271

@@ -294,7 +294,7 @@ static void test_run(void)
294294
REPORT_GUEST_ASSERT(uc);
295295
break;
296296
default:
297-
TEST_FAIL("Unexpected guest exit\n");
297+
TEST_FAIL("Unexpected guest exit");
298298
}
299299
}
300300

tools/testing/selftests/kvm/aarch64/page_fault_test.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -414,10 +414,10 @@ static bool punch_hole_in_backing_store(struct kvm_vm *vm,
414414
if (fd != -1) {
415415
ret = fallocate(fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
416416
0, paging_size);
417-
TEST_ASSERT(ret == 0, "fallocate failed\n");
417+
TEST_ASSERT(ret == 0, "fallocate failed");
418418
} else {
419419
ret = madvise(hva, paging_size, MADV_DONTNEED);
420-
TEST_ASSERT(ret == 0, "madvise failed\n");
420+
TEST_ASSERT(ret == 0, "madvise failed");
421421
}
422422

423423
return true;
@@ -501,7 +501,7 @@ static bool handle_cmd(struct kvm_vm *vm, int cmd)
501501

502502
void fail_vcpu_run_no_handler(int ret)
503503
{
504-
TEST_FAIL("Unexpected vcpu run failure\n");
504+
TEST_FAIL("Unexpected vcpu run failure");
505505
}
506506

507507
void fail_vcpu_run_mmio_no_syndrome_handler(int ret)

0 commit comments

Comments
 (0)