Skip to content

Commit 0ec3d6d

Browse files
committed
KVM: x86: Fully defer to vendor code to decide how to force immediate exit
Now that vmx->req_immediate_exit is used only in the scope of vmx_vcpu_run(), use force_immediate_exit to detect that KVM should usurp the VMX preemption to force a VM-Exit and let vendor code fully handle forcing a VM-Exit. Opportunsitically drop __kvm_request_immediate_exit() and just have vendor code call smp_send_reschedule() directly. SVM already does this when injecting an event while also trying to single-step an IRET, i.e. it's not exactly secret knowledge that KVM uses a reschedule IPI to force an exit. Link: https://lore.kernel.org/r/20240110012705.506918-7-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
1 parent 7b3d1bb commit 0ec3d6d

File tree

6 files changed

+19
-36
lines changed

6 files changed

+19
-36
lines changed

arch/x86/include/asm/kvm-x86-ops.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -103,7 +103,6 @@ KVM_X86_OP(write_tsc_multiplier)
103103
KVM_X86_OP(get_exit_info)
104104
KVM_X86_OP(check_intercept)
105105
KVM_X86_OP(handle_exit_irqoff)
106-
KVM_X86_OP(request_immediate_exit)
107106
KVM_X86_OP(sched_in)
108107
KVM_X86_OP_OPTIONAL(update_cpu_dirty_logging)
109108
KVM_X86_OP_OPTIONAL(vcpu_blocking)

arch/x86/include/asm/kvm_host.h

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1732,8 +1732,6 @@ struct kvm_x86_ops {
17321732
struct x86_exception *exception);
17331733
void (*handle_exit_irqoff)(struct kvm_vcpu *vcpu);
17341734

1735-
void (*request_immediate_exit)(struct kvm_vcpu *vcpu);
1736-
17371735
void (*sched_in)(struct kvm_vcpu *vcpu, int cpu);
17381736

17391737
/*
@@ -2239,7 +2237,6 @@ extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
22392237

22402238
int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu);
22412239
int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
2242-
void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu);
22432240

22442241
void __user *__x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa,
22452242
u32 size);

arch/x86/kvm/svm/svm.c

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4140,9 +4140,12 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu,
41404140
* is enough to force an immediate vmexit.
41414141
*/
41424142
disable_nmi_singlestep(svm);
4143-
smp_send_reschedule(vcpu->cpu);
4143+
force_immediate_exit = true;
41444144
}
41454145

4146+
if (force_immediate_exit)
4147+
smp_send_reschedule(vcpu->cpu);
4148+
41464149
pre_svm_run(vcpu);
41474150

41484151
sync_lapic_to_cr8(vcpu);
@@ -4995,8 +4998,6 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
49954998
.check_intercept = svm_check_intercept,
49964999
.handle_exit_irqoff = svm_handle_exit_irqoff,
49975000

4998-
.request_immediate_exit = __kvm_request_immediate_exit,
4999-
50005001
.sched_in = svm_sched_in,
50015002

50025003
.nested_ops = &svm_nested_ops,

arch/x86/kvm/vmx/vmx.c

Lines changed: 14 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -49,6 +49,8 @@
4949
#include <asm/spec-ctrl.h>
5050
#include <asm/vmx.h>
5151

52+
#include <trace/events/ipi.h>
53+
5254
#include "capabilities.h"
5355
#include "cpuid.h"
5456
#include "hyperv.h"
@@ -1281,8 +1283,6 @@ void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
12811283
u16 fs_sel, gs_sel;
12821284
int i;
12831285

1284-
vmx->req_immediate_exit = false;
1285-
12861286
/*
12871287
* Note that guest MSRs to be saved/restored can also be changed
12881288
* when guest state is loaded. This happens when guest transitions
@@ -5988,7 +5988,8 @@ static int handle_pml_full(struct kvm_vcpu *vcpu)
59885988
return 1;
59895989
}
59905990

5991-
static fastpath_t handle_fastpath_preemption_timer(struct kvm_vcpu *vcpu)
5991+
static fastpath_t handle_fastpath_preemption_timer(struct kvm_vcpu *vcpu,
5992+
bool force_immediate_exit)
59925993
{
59935994
struct vcpu_vmx *vmx = to_vmx(vcpu);
59945995

@@ -6004,7 +6005,7 @@ static fastpath_t handle_fastpath_preemption_timer(struct kvm_vcpu *vcpu)
60046005
* If the timer expired because KVM used it to force an immediate exit,
60056006
* then mission accomplished.
60066007
*/
6007-
if (vmx->req_immediate_exit)
6008+
if (force_immediate_exit)
60086009
return EXIT_FASTPATH_EXIT_HANDLED;
60096010

60106011
/*
@@ -7166,13 +7167,13 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
71667167
msrs[i].host, false);
71677168
}
71687169

7169-
static void vmx_update_hv_timer(struct kvm_vcpu *vcpu)
7170+
static void vmx_update_hv_timer(struct kvm_vcpu *vcpu, bool force_immediate_exit)
71707171
{
71717172
struct vcpu_vmx *vmx = to_vmx(vcpu);
71727173
u64 tscl;
71737174
u32 delta_tsc;
71747175

7175-
if (vmx->req_immediate_exit) {
7176+
if (force_immediate_exit) {
71767177
vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, 0);
71777178
vmx->loaded_vmcs->hv_timer_soft_disabled = false;
71787179
} else if (vmx->hv_deadline_tsc != -1) {
@@ -7225,7 +7226,8 @@ void noinstr vmx_spec_ctrl_restore_host(struct vcpu_vmx *vmx,
72257226
barrier_nospec();
72267227
}
72277228

7228-
static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
7229+
static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu,
7230+
bool force_immediate_exit)
72297231
{
72307232
/*
72317233
* If L2 is active, some VMX preemption timer exits can be handled in
@@ -7239,7 +7241,7 @@ static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
72397241
case EXIT_REASON_MSR_WRITE:
72407242
return handle_fastpath_set_msr_irqoff(vcpu);
72417243
case EXIT_REASON_PREEMPTION_TIMER:
7242-
return handle_fastpath_preemption_timer(vcpu);
7244+
return handle_fastpath_preemption_timer(vcpu, force_immediate_exit);
72437245
default:
72447246
return EXIT_FASTPATH_NONE;
72457247
}
@@ -7382,7 +7384,9 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit)
73827384
vmx_passthrough_lbr_msrs(vcpu);
73837385

73847386
if (enable_preemption_timer)
7385-
vmx_update_hv_timer(vcpu);
7387+
vmx_update_hv_timer(vcpu, force_immediate_exit);
7388+
else if (force_immediate_exit)
7389+
smp_send_reschedule(vcpu->cpu);
73867390

73877391
kvm_wait_lapic_expire(vcpu);
73887392

@@ -7446,7 +7450,7 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit)
74467450
vmx_recover_nmi_blocking(vmx);
74477451
vmx_complete_interrupts(vmx);
74487452

7449-
return vmx_exit_handlers_fastpath(vcpu);
7453+
return vmx_exit_handlers_fastpath(vcpu, force_immediate_exit);
74507454
}
74517455

74527456
static void vmx_vcpu_free(struct kvm_vcpu *vcpu)
@@ -7926,11 +7930,6 @@ static __init void vmx_set_cpu_caps(void)
79267930
kvm_cpu_cap_check_and_set(X86_FEATURE_WAITPKG);
79277931
}
79287932

7929-
static void vmx_request_immediate_exit(struct kvm_vcpu *vcpu)
7930-
{
7931-
to_vmx(vcpu)->req_immediate_exit = true;
7932-
}
7933-
79347933
static int vmx_check_intercept_io(struct kvm_vcpu *vcpu,
79357934
struct x86_instruction_info *info)
79367935
{
@@ -8383,8 +8382,6 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
83838382
.check_intercept = vmx_check_intercept,
83848383
.handle_exit_irqoff = vmx_handle_exit_irqoff,
83858384

8386-
.request_immediate_exit = vmx_request_immediate_exit,
8387-
83888385
.sched_in = vmx_sched_in,
83898386

83908387
.cpu_dirty_log_size = PML_ENTITY_NUM,
@@ -8644,7 +8641,6 @@ static __init int hardware_setup(void)
86448641
if (!enable_preemption_timer) {
86458642
vmx_x86_ops.set_hv_timer = NULL;
86468643
vmx_x86_ops.cancel_hv_timer = NULL;
8647-
vmx_x86_ops.request_immediate_exit = __kvm_request_immediate_exit;
86488644
}
86498645

86508646
kvm_caps.supported_mce_cap |= MCG_LMCE_P;

arch/x86/kvm/vmx/vmx.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -332,8 +332,6 @@ struct vcpu_vmx {
332332
unsigned int ple_window;
333333
bool ple_window_dirty;
334334

335-
bool req_immediate_exit;
336-
337335
/* Support for PML */
338336
#define PML_ENTITY_NUM 512
339337
struct page *pml_pg;

arch/x86/kvm/x86.c

Lines changed: 1 addition & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -10667,12 +10667,6 @@ static void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
1066710667
static_call_cond(kvm_x86_set_apic_access_page_addr)(vcpu);
1066810668
}
1066910669

10670-
void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu)
10671-
{
10672-
smp_send_reschedule(vcpu->cpu);
10673-
}
10674-
EXPORT_SYMBOL_GPL(__kvm_request_immediate_exit);
10675-
1067610670
/*
1067710671
* Called within kvm->srcu read side.
1067810672
* Returns 1 to let vcpu_run() continue the guest execution loop without
@@ -10922,10 +10916,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
1092210916
goto cancel_injection;
1092310917
}
1092410918

10925-
if (req_immediate_exit) {
10919+
if (req_immediate_exit)
1092610920
kvm_make_request(KVM_REQ_EVENT, vcpu);
10927-
static_call(kvm_x86_request_immediate_exit)(vcpu);
10928-
}
1092910921

1093010922
fpregs_assert_state_consistent();
1093110923
if (test_thread_flag(TIF_NEED_FPU_LOAD))

0 commit comments

Comments
 (0)