Skip to content

Commit b6262dd

Browse files
committed
Merge branch 'kvm-6.15-rc2-fixes' into HEAD
2 parents 39c5722 + bc52ae0 commit b6262dd

File tree

8 files changed

+52
-20
lines changed

8 files changed

+52
-20
lines changed

arch/x86/include/asm/kvm_host.h

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1472,8 +1472,13 @@ struct kvm_arch {
14721472
struct once nx_once;
14731473

14741474
#ifdef CONFIG_X86_64
1475-
/* The number of TDP MMU pages across all roots. */
1475+
#ifdef CONFIG_KVM_PROVE_MMU
1476+
/*
1477+
* The number of TDP MMU pages across all roots. Used only to sanity
1478+
* check that KVM isn't leaking TDP MMU pages.
1479+
*/
14761480
atomic64_t tdp_mmu_pages;
1481+
#endif
14771482

14781483
/*
14791484
* List of struct kvm_mmu_pages being used as roots.

arch/x86/kvm/cpuid.c

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1427,8 +1427,8 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
14271427
}
14281428
break;
14291429
case 0xa: { /* Architectural Performance Monitoring */
1430-
union cpuid10_eax eax;
1431-
union cpuid10_edx edx;
1430+
union cpuid10_eax eax = { };
1431+
union cpuid10_edx edx = { };
14321432

14331433
if (!enable_pmu || !static_cpu_has(X86_FEATURE_ARCH_PERFMON)) {
14341434
entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
@@ -1444,8 +1444,6 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
14441444

14451445
if (kvm_pmu_cap.version)
14461446
edx.split.anythread_deprecated = 1;
1447-
edx.split.reserved1 = 0;
1448-
edx.split.reserved2 = 0;
14491447

14501448
entry->eax = eax.full;
14511449
entry->ebx = kvm_pmu_cap.events_mask;
@@ -1763,7 +1761,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
17631761
break;
17641762
/* AMD Extended Performance Monitoring and Debug */
17651763
case 0x80000022: {
1766-
union cpuid_0x80000022_ebx ebx;
1764+
union cpuid_0x80000022_ebx ebx = { };
17671765

17681766
entry->ecx = entry->edx = 0;
17691767
if (!enable_pmu || !kvm_cpu_cap_has(X86_FEATURE_PERFMON_V2)) {

arch/x86/kvm/mmu/tdp_mmu.c

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,9 @@ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
4040
kvm_tdp_mmu_invalidate_roots(kvm, KVM_VALID_ROOTS);
4141
kvm_tdp_mmu_zap_invalidated_roots(kvm, false);
4242

43-
WARN_ON(atomic64_read(&kvm->arch.tdp_mmu_pages));
43+
#ifdef CONFIG_KVM_PROVE_MMU
44+
KVM_MMU_WARN_ON(atomic64_read(&kvm->arch.tdp_mmu_pages));
45+
#endif
4446
WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
4547

4648
/*
@@ -325,13 +327,17 @@ static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
325327
static void tdp_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
326328
{
327329
kvm_account_pgtable_pages((void *)sp->spt, +1);
330+
#ifdef CONFIG_KVM_PROVE_MMU
328331
atomic64_inc(&kvm->arch.tdp_mmu_pages);
332+
#endif
329333
}
330334

331335
static void tdp_unaccount_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
332336
{
333337
kvm_account_pgtable_pages((void *)sp->spt, -1);
338+
#ifdef CONFIG_KVM_PROVE_MMU
334339
atomic64_dec(&kvm->arch.tdp_mmu_pages);
340+
#endif
335341
}
336342

337343
/**

arch/x86/kvm/x86.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11786,6 +11786,8 @@ int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1178611786
if (kvm_mpx_supported())
1178711787
kvm_load_guest_fpu(vcpu);
1178811788

11789+
kvm_vcpu_srcu_read_lock(vcpu);
11790+
1178911791
r = kvm_apic_accept_events(vcpu);
1179011792
if (r < 0)
1179111793
goto out;
@@ -11799,6 +11801,8 @@ int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1179911801
mp_state->mp_state = vcpu->arch.mp_state;
1180011802

1180111803
out:
11804+
kvm_vcpu_srcu_read_unlock(vcpu);
11805+
1180211806
if (kvm_mpx_supported())
1180311807
kvm_put_guest_fpu(vcpu);
1180411808
vcpu_put(vcpu);

include/linux/kvm_host.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2382,7 +2382,7 @@ static inline bool kvm_is_visible_memslot(struct kvm_memory_slot *memslot)
23822382
struct kvm_vcpu *kvm_get_running_vcpu(void);
23832383
struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
23842384

2385-
#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
2385+
#if IS_ENABLED(CONFIG_HAVE_KVM_IRQ_BYPASS)
23862386
bool kvm_arch_has_irq_bypass(void);
23872387
int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *,
23882388
struct irq_bypass_producer *);

tools/testing/selftests/kvm/rseq_test.c

Lines changed: 25 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -196,25 +196,27 @@ static void calc_min_max_cpu(void)
196196
static void help(const char *name)
197197
{
198198
puts("");
199-
printf("usage: %s [-h] [-u]\n", name);
199+
printf("usage: %s [-h] [-u] [-l latency]\n", name);
200200
printf(" -u: Don't sanity check the number of successful KVM_RUNs\n");
201+
printf(" -l: Set /dev/cpu_dma_latency to suppress deep sleep states\n");
201202
puts("");
202203
exit(0);
203204
}
204205

205206
int main(int argc, char *argv[])
206207
{
208+
int r, i, snapshot, opt, fd = -1, latency = -1;
207209
bool skip_sanity_check = false;
208-
int r, i, snapshot;
209210
struct kvm_vm *vm;
210211
struct kvm_vcpu *vcpu;
211212
u32 cpu, rseq_cpu;
212-
int opt;
213213

214-
while ((opt = getopt(argc, argv, "hu")) != -1) {
214+
while ((opt = getopt(argc, argv, "hl:u")) != -1) {
215215
switch (opt) {
216216
case 'u':
217217
skip_sanity_check = true;
218+
case 'l':
219+
latency = atoi_paranoid(optarg);
218220
break;
219221
case 'h':
220222
default:
@@ -243,6 +245,20 @@ int main(int argc, char *argv[])
243245
pthread_create(&migration_thread, NULL, migration_worker,
244246
(void *)(unsigned long)syscall(SYS_gettid));
245247

248+
if (latency >= 0) {
249+
/*
250+
* Writes to cpu_dma_latency persist only while the file is
251+
* open, i.e. it allows userspace to provide guaranteed latency
252+
* while running a workload. Keep the file open until the test
253+
* completes, otherwise writing cpu_dma_latency is meaningless.
254+
*/
255+
fd = open("/dev/cpu_dma_latency", O_RDWR);
256+
TEST_ASSERT(fd >= 0, __KVM_SYSCALL_ERROR("open() /dev/cpu_dma_latency", fd));
257+
258+
r = write(fd, &latency, 4);
259+
TEST_ASSERT(r >= 1, "Error setting /dev/cpu_dma_latency");
260+
}
261+
246262
for (i = 0; !done; i++) {
247263
vcpu_run(vcpu);
248264
TEST_ASSERT(get_ucall(vcpu, NULL) == UCALL_SYNC,
@@ -278,6 +294,9 @@ int main(int argc, char *argv[])
278294
"rseq CPU = %d, sched CPU = %d", rseq_cpu, cpu);
279295
}
280296

297+
if (fd > 0)
298+
close(fd);
299+
281300
/*
282301
* Sanity check that the test was able to enter the guest a reasonable
283302
* number of times, e.g. didn't get stalled too often/long waiting for
@@ -293,8 +312,8 @@ int main(int argc, char *argv[])
293312
TEST_ASSERT(skip_sanity_check || i > (NR_TASK_MIGRATIONS / 2),
294313
"Only performed %d KVM_RUNs, task stalled too much?\n\n"
295314
" Try disabling deep sleep states to reduce CPU wakeup latency,\n"
296-
" e.g. via cpuidle.off=1 or setting /dev/cpu_dma_latency to '0',\n"
297-
" or run with -u to disable this sanity check.", i);
315+
" e.g. via cpuidle.off=1 or via -l <latency>, or run with -u to\n"
316+
" disable this sanity check.", i);
298317

299318
pthread_join(migration_thread, NULL);
300319

virt/kvm/Kconfig

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,7 @@ config KVM_COMPAT
7575
depends on KVM && COMPAT && !(S390 || ARM64 || RISCV)
7676

7777
config HAVE_KVM_IRQ_BYPASS
78-
bool
78+
tristate
7979
select IRQ_BYPASS_MANAGER
8080

8181
config HAVE_KVM_VCPU_ASYNC_IOCTL

virt/kvm/eventfd.c

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -149,7 +149,7 @@ irqfd_shutdown(struct work_struct *work)
149149
/*
150150
* It is now safe to release the object's resources
151151
*/
152-
#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
152+
#if IS_ENABLED(CONFIG_HAVE_KVM_IRQ_BYPASS)
153153
irq_bypass_unregister_consumer(&irqfd->consumer);
154154
#endif
155155
eventfd_ctx_put(irqfd->eventfd);
@@ -274,7 +274,7 @@ static void irqfd_update(struct kvm *kvm, struct kvm_kernel_irqfd *irqfd)
274274
write_seqcount_end(&irqfd->irq_entry_sc);
275275
}
276276

277-
#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
277+
#if IS_ENABLED(CONFIG_HAVE_KVM_IRQ_BYPASS)
278278
void __attribute__((weak)) kvm_arch_irq_bypass_stop(
279279
struct irq_bypass_consumer *cons)
280280
{
@@ -424,7 +424,7 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
424424
if (events & EPOLLIN)
425425
schedule_work(&irqfd->inject);
426426

427-
#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
427+
#if IS_ENABLED(CONFIG_HAVE_KVM_IRQ_BYPASS)
428428
if (kvm_arch_has_irq_bypass()) {
429429
irqfd->consumer.token = (void *)irqfd->eventfd;
430430
irqfd->consumer.add_producer = kvm_arch_irq_bypass_add_producer;
@@ -609,14 +609,14 @@ void kvm_irq_routing_update(struct kvm *kvm)
609609
spin_lock_irq(&kvm->irqfds.lock);
610610

611611
list_for_each_entry(irqfd, &kvm->irqfds.items, list) {
612-
#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
612+
#if IS_ENABLED(CONFIG_HAVE_KVM_IRQ_BYPASS)
613613
/* Under irqfds.lock, so can read irq_entry safely */
614614
struct kvm_kernel_irq_routing_entry old = irqfd->irq_entry;
615615
#endif
616616

617617
irqfd_update(kvm, irqfd);
618618

619-
#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
619+
#if IS_ENABLED(CONFIG_HAVE_KVM_IRQ_BYPASS)
620620
if (irqfd->producer &&
621621
kvm_arch_irqfd_route_changed(&old, &irqfd->irq_entry)) {
622622
int ret = kvm_arch_update_irqfd_routing(

0 commit comments

Comments
 (0)