Skip to content

Commit c9cd0be

Browse files
committed
Merge tag 'kvm-x86-misc-6.9' of https://github.com/kvm-x86/linux into HEAD
KVM x86 misc changes for 6.9: - Explicitly initialize a variety of on-stack variables in the emulator that triggered KMSAN false positives (though in fairness in KMSAN, it's comically difficult to see that the uninitialized memory is never truly consumed). - Fix the deubgregs ABI for 32-bit KVM, and clean up code related to reading DR6 and DR7. - Rework the "force immediate exit" code so that vendor code ultimately decides how and when to force the exit. This allows VMX to further optimize handling preemption timer exits, and allows SVM to avoid sending a duplicate IPI (SVM also has a need to force an exit). - Fix a long-standing bug where kvm_has_noapic_vcpu could be left elevated if vCPU creation ultimately failed, and add WARN to guard against similar bugs. - Provide a dedicated arch hook for checking if a different vCPU was in-kernel (for directed yield), and simplify the logic for checking if the currently loaded vCPU is in-kernel. - Misc cleanups and fixes.
2 parents 507e72f + 78ccfce commit c9cd0be

File tree

15 files changed

+184
-177
lines changed

15 files changed

+184
-177
lines changed

arch/x86/include/asm/kvm-x86-ops.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -103,7 +103,6 @@ KVM_X86_OP(write_tsc_multiplier)
103103
KVM_X86_OP(get_exit_info)
104104
KVM_X86_OP(check_intercept)
105105
KVM_X86_OP(handle_exit_irqoff)
106-
KVM_X86_OP(request_immediate_exit)
107106
KVM_X86_OP(sched_in)
108107
KVM_X86_OP_OPTIONAL(update_cpu_dirty_logging)
109108
KVM_X86_OP_OPTIONAL(vcpu_blocking)

arch/x86/include/asm/kvm_host.h

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1665,7 +1665,8 @@ struct kvm_x86_ops {
16651665
void (*flush_tlb_guest)(struct kvm_vcpu *vcpu);
16661666

16671667
int (*vcpu_pre_run)(struct kvm_vcpu *vcpu);
1668-
enum exit_fastpath_completion (*vcpu_run)(struct kvm_vcpu *vcpu);
1668+
enum exit_fastpath_completion (*vcpu_run)(struct kvm_vcpu *vcpu,
1669+
bool force_immediate_exit);
16691670
int (*handle_exit)(struct kvm_vcpu *vcpu,
16701671
enum exit_fastpath_completion exit_fastpath);
16711672
int (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
@@ -1733,8 +1734,6 @@ struct kvm_x86_ops {
17331734
struct x86_exception *exception);
17341735
void (*handle_exit_irqoff)(struct kvm_vcpu *vcpu);
17351736

1736-
void (*request_immediate_exit)(struct kvm_vcpu *vcpu);
1737-
17381737
void (*sched_in)(struct kvm_vcpu *vcpu, int cpu);
17391738

17401739
/*
@@ -2047,7 +2046,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
20472046
int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
20482047
int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8);
20492048
int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val);
2050-
void kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val);
2049+
unsigned long kvm_get_dr(struct kvm_vcpu *vcpu, int dr);
20512050
unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu);
20522051
void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
20532052
int kvm_emulate_xsetbv(struct kvm_vcpu *vcpu);
@@ -2240,7 +2239,6 @@ extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
22402239

22412240
int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu);
22422241
int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
2243-
void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu);
22442242

22452243
void __user *__x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa,
22462244
u32 size);

arch/x86/include/asm/svm.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -358,10 +358,10 @@ struct sev_es_save_area {
358358
struct vmcb_seg ldtr;
359359
struct vmcb_seg idtr;
360360
struct vmcb_seg tr;
361-
u64 vmpl0_ssp;
362-
u64 vmpl1_ssp;
363-
u64 vmpl2_ssp;
364-
u64 vmpl3_ssp;
361+
u64 pl0_ssp;
362+
u64 pl1_ssp;
363+
u64 pl2_ssp;
364+
u64 pl3_ssp;
365365
u64 u_cet;
366366
u8 reserved_0xc8[2];
367367
u8 vmpl;

arch/x86/kvm/emulate.c

Lines changed: 19 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -1820,22 +1820,22 @@ static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
18201820
return X86EMUL_CONTINUE;
18211821
}
18221822

1823-
static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1823+
static int emulate_push(struct x86_emulate_ctxt *ctxt, const void *data, int len)
18241824
{
18251825
struct segmented_address addr;
18261826

1827-
rsp_increment(ctxt, -bytes);
1827+
rsp_increment(ctxt, -len);
18281828
addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
18291829
addr.seg = VCPU_SREG_SS;
18301830

1831-
return segmented_write(ctxt, addr, data, bytes);
1831+
return segmented_write(ctxt, addr, data, len);
18321832
}
18331833

18341834
static int em_push(struct x86_emulate_ctxt *ctxt)
18351835
{
18361836
/* Disable writeback. */
18371837
ctxt->dst.type = OP_NONE;
1838-
return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1838+
return emulate_push(ctxt, &ctxt->src.val, ctxt->op_bytes);
18391839
}
18401840

18411841
static int emulate_pop(struct x86_emulate_ctxt *ctxt,
@@ -1863,7 +1863,8 @@ static int emulate_popf(struct x86_emulate_ctxt *ctxt,
18631863
void *dest, int len)
18641864
{
18651865
int rc;
1866-
unsigned long val, change_mask;
1866+
unsigned long val = 0;
1867+
unsigned long change_mask;
18671868
int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
18681869
int cpl = ctxt->ops->cpl(ctxt);
18691870

@@ -1920,7 +1921,7 @@ static int em_enter(struct x86_emulate_ctxt *ctxt)
19201921
return X86EMUL_UNHANDLEABLE;
19211922

19221923
rbp = reg_read(ctxt, VCPU_REGS_RBP);
1923-
rc = push(ctxt, &rbp, stack_size(ctxt));
1924+
rc = emulate_push(ctxt, &rbp, stack_size(ctxt));
19241925
if (rc != X86EMUL_CONTINUE)
19251926
return rc;
19261927
assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
@@ -1954,7 +1955,7 @@ static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
19541955
static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
19551956
{
19561957
int seg = ctxt->src2.val;
1957-
unsigned long selector;
1958+
unsigned long selector = 0;
19581959
int rc;
19591960

19601961
rc = emulate_pop(ctxt, &selector, 2);
@@ -2000,7 +2001,7 @@ static int em_popa(struct x86_emulate_ctxt *ctxt)
20002001
{
20012002
int rc = X86EMUL_CONTINUE;
20022003
int reg = VCPU_REGS_RDI;
2003-
u32 val;
2004+
u32 val = 0;
20042005

20052006
while (reg >= VCPU_REGS_RAX) {
20062007
if (reg == VCPU_REGS_RSP) {
@@ -2229,7 +2230,7 @@ static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
22292230
static int em_ret(struct x86_emulate_ctxt *ctxt)
22302231
{
22312232
int rc;
2232-
unsigned long eip;
2233+
unsigned long eip = 0;
22332234

22342235
rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
22352236
if (rc != X86EMUL_CONTINUE)
@@ -2241,7 +2242,8 @@ static int em_ret(struct x86_emulate_ctxt *ctxt)
22412242
static int em_ret_far(struct x86_emulate_ctxt *ctxt)
22422243
{
22432244
int rc;
2244-
unsigned long eip, cs;
2245+
unsigned long eip = 0;
2246+
unsigned long cs = 0;
22452247
int cpl = ctxt->ops->cpl(ctxt);
22462248
struct desc_struct new_desc;
22472249

@@ -3011,7 +3013,7 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
30113013
ret = em_push(ctxt);
30123014
}
30133015

3014-
ops->get_dr(ctxt, 7, &dr7);
3016+
dr7 = ops->get_dr(ctxt, 7);
30153017
ops->set_dr(ctxt, 7, dr7 & ~(DR_LOCAL_ENABLE_MASK | DR_LOCAL_SLOWDOWN));
30163018

30173019
return ret;
@@ -3184,7 +3186,7 @@ static int em_call_far(struct x86_emulate_ctxt *ctxt)
31843186
static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
31853187
{
31863188
int rc;
3187-
unsigned long eip;
3189+
unsigned long eip = 0;
31883190

31893191
rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
31903192
if (rc != X86EMUL_CONTINUE)
@@ -3866,15 +3868,6 @@ static int check_cr_access(struct x86_emulate_ctxt *ctxt)
38663868
return X86EMUL_CONTINUE;
38673869
}
38683870

3869-
static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
3870-
{
3871-
unsigned long dr7;
3872-
3873-
ctxt->ops->get_dr(ctxt, 7, &dr7);
3874-
3875-
return dr7 & DR7_GD;
3876-
}
3877-
38783871
static int check_dr_read(struct x86_emulate_ctxt *ctxt)
38793872
{
38803873
int dr = ctxt->modrm_reg;
@@ -3887,10 +3880,10 @@ static int check_dr_read(struct x86_emulate_ctxt *ctxt)
38873880
if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
38883881
return emulate_ud(ctxt);
38893882

3890-
if (check_dr7_gd(ctxt)) {
3883+
if (ctxt->ops->get_dr(ctxt, 7) & DR7_GD) {
38913884
ulong dr6;
38923885

3893-
ctxt->ops->get_dr(ctxt, 6, &dr6);
3886+
dr6 = ctxt->ops->get_dr(ctxt, 6);
38943887
dr6 &= ~DR_TRAP_BITS;
38953888
dr6 |= DR6_BD | DR6_ACTIVE_LOW;
38963889
ctxt->ops->set_dr(ctxt, 6, dr6);
@@ -4505,11 +4498,11 @@ static const struct instr_dual instr_dual_0f_38_f1 = {
45054498
};
45064499

45074500
static const struct gprefix three_byte_0f_38_f0 = {
4508-
ID(0, &instr_dual_0f_38_f0), N, N, N
4501+
ID(0, &instr_dual_0f_38_f0), ID(0, &instr_dual_0f_38_f0), N, N
45094502
};
45104503

45114504
static const struct gprefix three_byte_0f_38_f1 = {
4512-
ID(0, &instr_dual_0f_38_f1), N, N, N
4505+
ID(0, &instr_dual_0f_38_f1), ID(0, &instr_dual_0f_38_f1), N, N
45134506
};
45144507

45154508
/*
@@ -5449,7 +5442,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
54495442
ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
54505443
break;
54515444
case 0x21: /* mov from dr to reg */
5452-
ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
5445+
ctxt->dst.val = ops->get_dr(ctxt, ctxt->modrm_reg);
54535446
break;
54545447
case 0x40 ... 0x4f: /* cmov */
54555448
if (test_cc(ctxt->b, ctxt->eflags))

arch/x86/kvm/kvm_emulate.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -203,7 +203,7 @@ struct x86_emulate_ops {
203203
ulong (*get_cr)(struct x86_emulate_ctxt *ctxt, int cr);
204204
int (*set_cr)(struct x86_emulate_ctxt *ctxt, int cr, ulong val);
205205
int (*cpl)(struct x86_emulate_ctxt *ctxt);
206-
void (*get_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong *dest);
206+
ulong (*get_dr)(struct x86_emulate_ctxt *ctxt, int dr);
207207
int (*set_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong value);
208208
int (*set_msr_with_filter)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 data);
209209
int (*get_msr_with_filter)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 *pdata);

arch/x86/kvm/lapic.c

Lines changed: 26 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -124,6 +124,9 @@ static inline int __apic_test_and_clear_vector(int vec, void *bitmap)
124124
return __test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
125125
}
126126

127+
__read_mostly DEFINE_STATIC_KEY_FALSE(kvm_has_noapic_vcpu);
128+
EXPORT_SYMBOL_GPL(kvm_has_noapic_vcpu);
129+
127130
__read_mostly DEFINE_STATIC_KEY_DEFERRED_FALSE(apic_hw_disabled, HZ);
128131
__read_mostly DEFINE_STATIC_KEY_DEFERRED_FALSE(apic_sw_disabled, HZ);
129132

@@ -2466,8 +2469,10 @@ void kvm_free_lapic(struct kvm_vcpu *vcpu)
24662469
{
24672470
struct kvm_lapic *apic = vcpu->arch.apic;
24682471

2469-
if (!vcpu->arch.apic)
2472+
if (!vcpu->arch.apic) {
2473+
static_branch_dec(&kvm_has_noapic_vcpu);
24702474
return;
2475+
}
24712476

24722477
hrtimer_cancel(&apic->lapic_timer.timer);
24732478

@@ -2809,6 +2814,11 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu, int timer_advance_ns)
28092814

28102815
ASSERT(vcpu != NULL);
28112816

2817+
if (!irqchip_in_kernel(vcpu->kvm)) {
2818+
static_branch_inc(&kvm_has_noapic_vcpu);
2819+
return 0;
2820+
}
2821+
28122822
apic = kzalloc(sizeof(*apic), GFP_KERNEL_ACCOUNT);
28132823
if (!apic)
28142824
goto nomem;
@@ -2844,6 +2854,21 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu, int timer_advance_ns)
28442854
static_branch_inc(&apic_sw_disabled.key); /* sw disabled at reset */
28452855
kvm_iodevice_init(&apic->dev, &apic_mmio_ops);
28462856

2857+
/*
2858+
* Defer evaluating inhibits until the vCPU is first run, as this vCPU
2859+
* will not get notified of any changes until this vCPU is visible to
2860+
* other vCPUs (marked online and added to the set of vCPUs).
2861+
*
2862+
* Opportunistically mark APICv active as VMX in particularly is highly
2863+
* unlikely to have inhibits. Ignore the current per-VM APICv state so
2864+
* that vCPU creation is guaranteed to run with a deterministic value,
2865+
* the request will ensure the vCPU gets the correct state before VM-Entry.
2866+
*/
2867+
if (enable_apicv) {
2868+
apic->apicv_active = true;
2869+
kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
2870+
}
2871+
28472872
return 0;
28482873
nomem_free_apic:
28492874
kfree(apic);

arch/x86/kvm/smm.c

Lines changed: 4 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -184,7 +184,6 @@ static void enter_smm_save_state_32(struct kvm_vcpu *vcpu,
184184
struct kvm_smram_state_32 *smram)
185185
{
186186
struct desc_ptr dt;
187-
unsigned long val;
188187
int i;
189188

190189
smram->cr0 = kvm_read_cr0(vcpu);
@@ -195,10 +194,8 @@ static void enter_smm_save_state_32(struct kvm_vcpu *vcpu,
195194
for (i = 0; i < 8; i++)
196195
smram->gprs[i] = kvm_register_read_raw(vcpu, i);
197196

198-
kvm_get_dr(vcpu, 6, &val);
199-
smram->dr6 = (u32)val;
200-
kvm_get_dr(vcpu, 7, &val);
201-
smram->dr7 = (u32)val;
197+
smram->dr6 = (u32)vcpu->arch.dr6;
198+
smram->dr7 = (u32)vcpu->arch.dr7;
202199

203200
enter_smm_save_seg_32(vcpu, &smram->tr, &smram->tr_sel, VCPU_SREG_TR);
204201
enter_smm_save_seg_32(vcpu, &smram->ldtr, &smram->ldtr_sel, VCPU_SREG_LDTR);
@@ -231,7 +228,6 @@ static void enter_smm_save_state_64(struct kvm_vcpu *vcpu,
231228
struct kvm_smram_state_64 *smram)
232229
{
233230
struct desc_ptr dt;
234-
unsigned long val;
235231
int i;
236232

237233
for (i = 0; i < 16; i++)
@@ -240,11 +236,8 @@ static void enter_smm_save_state_64(struct kvm_vcpu *vcpu,
240236
smram->rip = kvm_rip_read(vcpu);
241237
smram->rflags = kvm_get_rflags(vcpu);
242238

243-
244-
kvm_get_dr(vcpu, 6, &val);
245-
smram->dr6 = val;
246-
kvm_get_dr(vcpu, 7, &val);
247-
smram->dr7 = val;
239+
smram->dr6 = vcpu->arch.dr6;
240+
smram->dr7 = vcpu->arch.dr7;
248241

249242
smram->cr0 = kvm_read_cr0(vcpu);
250243
smram->cr3 = kvm_read_cr3(vcpu);

0 commit comments

Comments
 (0)