Skip to content

Commit b2d2af7

Browse files
mrutland-armbonzini
authored andcommitted
kvm/x86: rework guest entry logic
For consistency and clarity, migrate x86 over to the generic helpers for guest timing and lockdep/RCU/tracing management, and remove the x86-specific helpers. Prior to this patch, the guest timing was entered in kvm_guest_enter_irqoff() (called by svm_vcpu_enter_exit() and svm_vcpu_enter_exit()), and was exited by the call to vtime_account_guest_exit() within vcpu_enter_guest(). To minimize duplication and to more clearly balance entry and exit, both entry and exit of guest timing are placed in vcpu_enter_guest(), using the new guest_timing_{enter,exit}_irqoff() helpers. When context tracking is used a small amount of additional time will be accounted towards guests; tick-based accounting is unnaffected as IRQs are disabled at this point and not enabled until after the return from the guest. This also corrects (benign) mis-balanced context tracking accounting introduced in commits: ae95f56 ("KVM: X86: TSCDEADLINE MSR emulation fastpath") 26efe2f ("KVM: VMX: Handle preemption timer fastpath") Where KVM can enter a guest multiple times, calling vtime_guest_enter() without a corresponding call to vtime_account_guest_exit(), and with vtime_account_system() called when vtime_account_guest() should be used. As account_system_time() checks PF_VCPU and calls account_guest_time(), this doesn't result in any functional problem, but is unnecessarily confusing. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Acked-by: Paolo Bonzini <pbonzini@redhat.com> Reviewed-by: Nicolas Saenz Julienne <nsaenzju@redhat.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Jim Mattson <jmattson@google.com> Cc: Joerg Roedel <joro@8bytes.org> Cc: Sean Christopherson <seanjc@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vitaly Kuznetsov <vkuznets@redhat.com> Cc: Wanpeng Li <wanpengli@tencent.com> Message-Id: <20220201132926.3301912-4-mark.rutland@arm.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
1 parent 72e3244 commit b2d2af7

File tree

4 files changed

+7
-50
lines changed

4 files changed

+7
-50
lines changed

arch/x86/kvm/svm/svm.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3630,7 +3630,7 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
36303630
struct vcpu_svm *svm = to_svm(vcpu);
36313631
unsigned long vmcb_pa = svm->current_vmcb->pa;
36323632

3633-
kvm_guest_enter_irqoff();
3633+
guest_state_enter_irqoff();
36343634

36353635
if (sev_es_guest(vcpu->kvm)) {
36363636
__svm_sev_es_vcpu_run(vmcb_pa);
@@ -3650,7 +3650,7 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
36503650
vmload(__sme_page_pa(sd->save_area));
36513651
}
36523652

3653-
kvm_guest_exit_irqoff();
3653+
guest_state_exit_irqoff();
36543654
}
36553655

36563656
static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)

arch/x86/kvm/vmx/vmx.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6767,7 +6767,7 @@ static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
67676767
static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
67686768
struct vcpu_vmx *vmx)
67696769
{
6770-
kvm_guest_enter_irqoff();
6770+
guest_state_enter_irqoff();
67716771

67726772
/* L1D Flush includes CPU buffer clear to mitigate MDS */
67736773
if (static_branch_unlikely(&vmx_l1d_should_flush))
@@ -6783,7 +6783,7 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
67836783

67846784
vcpu->arch.cr2 = native_read_cr2();
67856785

6786-
kvm_guest_exit_irqoff();
6786+
guest_state_exit_irqoff();
67876787
}
67886788

67896789
static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)

arch/x86/kvm/x86.c

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10088,6 +10088,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
1008810088
set_debugreg(0, 7);
1008910089
}
1009010090

10091+
guest_timing_enter_irqoff();
10092+
1009110093
for (;;) {
1009210094
/*
1009310095
* Assert that vCPU vs. VM APICv state is consistent. An APICv
@@ -10172,7 +10174,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
1017210174
* of accounting via context tracking, but the loss of accuracy is
1017310175
* acceptable for all known use cases.
1017410176
*/
10175-
vtime_account_guest_exit();
10177+
guest_timing_exit_irqoff();
1017610178

1017710179
if (lapic_in_kernel(vcpu)) {
1017810180
s64 delta = vcpu->arch.apic->lapic_timer.advance_expire_delta;

arch/x86/kvm/x86.h

Lines changed: 0 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -10,51 +10,6 @@
1010

1111
void kvm_spurious_fault(void);
1212

13-
static __always_inline void kvm_guest_enter_irqoff(void)
14-
{
15-
/*
16-
* VMENTER enables interrupts (host state), but the kernel state is
17-
* interrupts disabled when this is invoked. Also tell RCU about
18-
* it. This is the same logic as for exit_to_user_mode().
19-
*
20-
* This ensures that e.g. latency analysis on the host observes
21-
* guest mode as interrupt enabled.
22-
*
23-
* guest_enter_irqoff() informs context tracking about the
24-
* transition to guest mode and if enabled adjusts RCU state
25-
* accordingly.
26-
*/
27-
instrumentation_begin();
28-
trace_hardirqs_on_prepare();
29-
lockdep_hardirqs_on_prepare(CALLER_ADDR0);
30-
instrumentation_end();
31-
32-
guest_enter_irqoff();
33-
lockdep_hardirqs_on(CALLER_ADDR0);
34-
}
35-
36-
static __always_inline void kvm_guest_exit_irqoff(void)
37-
{
38-
/*
39-
* VMEXIT disables interrupts (host state), but tracing and lockdep
40-
* have them in state 'on' as recorded before entering guest mode.
41-
* Same as enter_from_user_mode().
42-
*
43-
* context_tracking_guest_exit() restores host context and reinstates
44-
* RCU if enabled and required.
45-
*
46-
* This needs to be done immediately after VM-Exit, before any code
47-
* that might contain tracepoints or call out to the greater world,
48-
* e.g. before x86_spec_ctrl_restore_host().
49-
*/
50-
lockdep_hardirqs_off(CALLER_ADDR0);
51-
context_tracking_guest_exit();
52-
53-
instrumentation_begin();
54-
trace_hardirqs_off_finish();
55-
instrumentation_end();
56-
}
57-
5813
#define KVM_NESTED_VMENTER_CONSISTENCY_CHECK(consistency_check) \
5914
({ \
6015
bool failed = (consistency_check); \

0 commit comments

Comments
 (0)