Skip to content

Commit 26a0652

Browse files
sean-jcbonzini
authored andcommitted
KVM: x86: Disallow KVM_SET_SREGS{2} if incoming CR0 is invalid
Reject KVM_SET_SREGS{2} with -EINVAL if the incoming CR0 is invalid, e.g. due to setting bits 63:32, illegal combinations, or to a value that isn't allowed in VMX (non-)root mode. The VMX checks in particular are "fun" as failure to disallow Real Mode for an L2 that is configured with unrestricted guest disabled, when KVM itself has unrestricted guest enabled, will result in KVM forcing VM86 mode to virtual Real Mode for L2, but then fail to unwind the related metadata when synthesizing a nested VM-Exit back to L1 (which has unrestricted guest enabled). Opportunistically fix a benign typo in the prototype for is_valid_cr4(). Cc: stable@vger.kernel.org Reported-by: syzbot+5feef0b9ee9c8e9e5689@syzkaller.appspotmail.com Closes: https://lore.kernel.org/all/000000000000f316b705fdf6e2b4@google.com Signed-off-by: Sean Christopherson <seanjc@google.com> Message-Id: <20230613203037.1968489-2-seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
1 parent 8802183 commit 26a0652

File tree

5 files changed

+52
-20
lines changed

5 files changed

+52
-20
lines changed

arch/x86/include/asm/kvm-x86-ops.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,7 @@ KVM_X86_OP(get_segment)
3737
KVM_X86_OP(get_cpl)
3838
KVM_X86_OP(set_segment)
3939
KVM_X86_OP(get_cs_db_l_bits)
40+
KVM_X86_OP(is_valid_cr0)
4041
KVM_X86_OP(set_cr0)
4142
KVM_X86_OP_OPTIONAL(post_set_cr3)
4243
KVM_X86_OP(is_valid_cr4)

arch/x86/include/asm/kvm_host.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1566,9 +1566,10 @@ struct kvm_x86_ops {
15661566
void (*set_segment)(struct kvm_vcpu *vcpu,
15671567
struct kvm_segment *var, int seg);
15681568
void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
1569+
bool (*is_valid_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
15691570
void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
15701571
void (*post_set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
1571-
bool (*is_valid_cr4)(struct kvm_vcpu *vcpu, unsigned long cr0);
1572+
bool (*is_valid_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
15721573
void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
15731574
int (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
15741575
void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);

arch/x86/kvm/svm/svm.c

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1786,6 +1786,11 @@ static void sev_post_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
17861786
}
17871787
}
17881788

1789+
static bool svm_is_valid_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1790+
{
1791+
return true;
1792+
}
1793+
17891794
void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
17901795
{
17911796
struct vcpu_svm *svm = to_svm(vcpu);
@@ -4809,6 +4814,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
48094814
.set_segment = svm_set_segment,
48104815
.get_cpl = svm_get_cpl,
48114816
.get_cs_db_l_bits = svm_get_cs_db_l_bits,
4817+
.is_valid_cr0 = svm_is_valid_cr0,
48124818
.set_cr0 = svm_set_cr0,
48134819
.post_set_cr3 = sev_post_set_cr3,
48144820
.is_valid_cr4 = svm_is_valid_cr4,

arch/x86/kvm/vmx/vmx.c

Lines changed: 21 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -3047,6 +3047,15 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
30473047
struct vcpu_vmx *vmx = to_vmx(vcpu);
30483048
struct kvm_vmx *kvm_vmx = to_kvm_vmx(vcpu->kvm);
30493049

3050+
/*
3051+
* KVM should never use VM86 to virtualize Real Mode when L2 is active,
3052+
* as using VM86 is unnecessary if unrestricted guest is enabled, and
3053+
* if unrestricted guest is disabled, VM-Enter (from L1) with CR0.PG=0
3054+
* should VM-Fail and KVM should reject userspace attempts to stuff
3055+
* CR0.PG=0 when L2 is active.
3056+
*/
3057+
WARN_ON_ONCE(is_guest_mode(vcpu));
3058+
30503059
vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR);
30513060
vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES);
30523061
vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS);
@@ -3236,6 +3245,17 @@ void ept_save_pdptrs(struct kvm_vcpu *vcpu)
32363245
#define CR3_EXITING_BITS (CPU_BASED_CR3_LOAD_EXITING | \
32373246
CPU_BASED_CR3_STORE_EXITING)
32383247

3248+
static bool vmx_is_valid_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
3249+
{
3250+
if (is_guest_mode(vcpu))
3251+
return nested_guest_cr0_valid(vcpu, cr0);
3252+
3253+
if (to_vmx(vcpu)->nested.vmxon)
3254+
return nested_host_cr0_valid(vcpu, cr0);
3255+
3256+
return true;
3257+
}
3258+
32393259
void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
32403260
{
32413261
struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -5375,18 +5395,11 @@ static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val)
53755395
val = (val & ~vmcs12->cr0_guest_host_mask) |
53765396
(vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask);
53775397

5378-
if (!nested_guest_cr0_valid(vcpu, val))
5379-
return 1;
5380-
53815398
if (kvm_set_cr0(vcpu, val))
53825399
return 1;
53835400
vmcs_writel(CR0_READ_SHADOW, orig_val);
53845401
return 0;
53855402
} else {
5386-
if (to_vmx(vcpu)->nested.vmxon &&
5387-
!nested_host_cr0_valid(vcpu, val))
5388-
return 1;
5389-
53905403
return kvm_set_cr0(vcpu, val);
53915404
}
53925405
}
@@ -8214,6 +8227,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
82148227
.set_segment = vmx_set_segment,
82158228
.get_cpl = vmx_get_cpl,
82168229
.get_cs_db_l_bits = vmx_get_cs_db_l_bits,
8230+
.is_valid_cr0 = vmx_is_valid_cr0,
82178231
.set_cr0 = vmx_set_cr0,
82188232
.is_valid_cr4 = vmx_is_valid_cr4,
82198233
.set_cr4 = vmx_set_cr4,

arch/x86/kvm/x86.c

Lines changed: 22 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -906,6 +906,22 @@ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
906906
}
907907
EXPORT_SYMBOL_GPL(load_pdptrs);
908908

909+
static bool kvm_is_valid_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
910+
{
911+
#ifdef CONFIG_X86_64
912+
if (cr0 & 0xffffffff00000000UL)
913+
return false;
914+
#endif
915+
916+
if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD))
917+
return false;
918+
919+
if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE))
920+
return false;
921+
922+
return static_call(kvm_x86_is_valid_cr0)(vcpu, cr0);
923+
}
924+
909925
void kvm_post_set_cr0(struct kvm_vcpu *vcpu, unsigned long old_cr0, unsigned long cr0)
910926
{
911927
/*
@@ -952,20 +968,13 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
952968
{
953969
unsigned long old_cr0 = kvm_read_cr0(vcpu);
954970

955-
cr0 |= X86_CR0_ET;
956-
957-
#ifdef CONFIG_X86_64
958-
if (cr0 & 0xffffffff00000000UL)
971+
if (!kvm_is_valid_cr0(vcpu, cr0))
959972
return 1;
960-
#endif
961-
962-
cr0 &= ~CR0_RESERVED_BITS;
963973

964-
if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD))
965-
return 1;
974+
cr0 |= X86_CR0_ET;
966975

967-
if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE))
968-
return 1;
976+
/* Write to CR0 reserved bits are ignored, even on Intel. */
977+
cr0 &= ~CR0_RESERVED_BITS;
969978

970979
#ifdef CONFIG_X86_64
971980
if ((vcpu->arch.efer & EFER_LME) && !is_paging(vcpu) &&
@@ -11468,7 +11477,8 @@ static bool kvm_is_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1146811477
return false;
1146911478
}
1147011479

11471-
return kvm_is_valid_cr4(vcpu, sregs->cr4);
11480+
return kvm_is_valid_cr4(vcpu, sregs->cr4) &&
11481+
kvm_is_valid_cr0(vcpu, sregs->cr0);
1147211482
}
1147311483

1147411484
static int __set_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs,

0 commit comments

Comments
 (0)