Skip to content

Commit 6e2b235

Browse files
committed
KVM: Grab vcpu->mutex across installing the vCPU's fd and bumping online_vcpus
During vCPU creation, acquire vcpu->mutex prior to exposing the vCPU to userspace, and hold the mutex until online_vcpus is bumped, i.e. until the vCPU is fully online from KVM's perspective. To ensure asynchronous vCPU ioctls also wait for the vCPU to come online, explicitly check online_vcpus at the start of kvm_vcpu_ioctl(), and take the vCPU's mutex to wait if necessary (having to wait for any ioctl should be exceedingly rare, i.e. not worth optimizing). Reported-by: Will Deacon <will@kernel.org> Reported-by: Michal Luczaj <mhal@rbox.co> Link: https://lore.kernel.org/all/20240730155646.1687-1-will@kernel.org Acked-by: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20241009150455.1057573-4-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
1 parent 0664dc7 commit 6e2b235

File tree

1 file changed

+46
-1
lines changed

1 file changed

+46
-1
lines changed

virt/kvm/kvm_main.c

Lines changed: 46 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4132,7 +4132,14 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, unsigned long id)
41324132
if (r)
41334133
goto unlock_vcpu_destroy;
41344134

4135-
/* Now it's all set up, let userspace reach it */
4135+
/*
4136+
* Now it's all set up, let userspace reach it. Grab the vCPU's mutex
4137+
* so that userspace can't invoke vCPU ioctl()s until the vCPU is fully
4138+
* visible (per online_vcpus), e.g. so that KVM doesn't get tricked
4139+
* into a NULL-pointer dereference because KVM thinks the _current_
4140+
* vCPU doesn't exist.
4141+
*/
4142+
mutex_lock(&vcpu->mutex);
41364143
kvm_get_kvm(kvm);
41374144
r = create_vcpu_fd(vcpu);
41384145
if (r < 0)
@@ -4149,13 +4156,15 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, unsigned long id)
41494156
*/
41504157
smp_wmb();
41514158
atomic_inc(&kvm->online_vcpus);
4159+
mutex_unlock(&vcpu->mutex);
41524160

41534161
mutex_unlock(&kvm->lock);
41544162
kvm_arch_vcpu_postcreate(vcpu);
41554163
kvm_create_vcpu_debugfs(vcpu);
41564164
return r;
41574165

41584166
kvm_put_xa_release:
4167+
mutex_unlock(&vcpu->mutex);
41594168
kvm_put_kvm_no_destroy(kvm);
41604169
xa_release(&kvm->vcpu_array, vcpu->vcpu_idx);
41614170
unlock_vcpu_destroy:
@@ -4282,6 +4291,33 @@ static int kvm_vcpu_pre_fault_memory(struct kvm_vcpu *vcpu,
42824291
}
42834292
#endif
42844293

4294+
static int kvm_wait_for_vcpu_online(struct kvm_vcpu *vcpu)
4295+
{
4296+
struct kvm *kvm = vcpu->kvm;
4297+
4298+
/*
4299+
* In practice, this happy path will always be taken, as a well-behaved
4300+
* VMM will never invoke a vCPU ioctl() before KVM_CREATE_VCPU returns.
4301+
*/
4302+
if (likely(vcpu->vcpu_idx < atomic_read(&kvm->online_vcpus)))
4303+
return 0;
4304+
4305+
/*
4306+
* Acquire and release the vCPU's mutex to wait for vCPU creation to
4307+
* complete (kvm_vm_ioctl_create_vcpu() holds the mutex until the vCPU
4308+
* is fully online).
4309+
*/
4310+
if (mutex_lock_killable(&vcpu->mutex))
4311+
return -EINTR;
4312+
4313+
mutex_unlock(&vcpu->mutex);
4314+
4315+
if (WARN_ON_ONCE(!kvm_get_vcpu(kvm, vcpu->vcpu_idx)))
4316+
return -EIO;
4317+
4318+
return 0;
4319+
}
4320+
42854321
static long kvm_vcpu_ioctl(struct file *filp,
42864322
unsigned int ioctl, unsigned long arg)
42874323
{
@@ -4297,6 +4333,15 @@ static long kvm_vcpu_ioctl(struct file *filp,
42974333
if (unlikely(_IOC_TYPE(ioctl) != KVMIO))
42984334
return -EINVAL;
42994335

4336+
/*
4337+
* Wait for the vCPU to be online before handling the ioctl(), as KVM
4338+
* assumes the vCPU is reachable via vcpu_array, i.e. may dereference
4339+
* a NULL pointer if userspace invokes an ioctl() before KVM is ready.
4340+
*/
4341+
r = kvm_wait_for_vcpu_online(vcpu);
4342+
if (r)
4343+
return r;
4344+
43004345
/*
43014346
* Some architectures have vcpu ioctls that are asynchronous to vcpu
43024347
* execution; mutex_lock() would break them.

0 commit comments

Comments
 (0)