Skip to content

Commit b50cb2b

Browse files
committed
KVM: x86: Use a dedicated flow for queueing re-injected exceptions
Open code the filling of vcpu->arch.exception in kvm_requeue_exception() instead of bouncing through kvm_multiple_exception(), as re-injection doesn't actually share that much code with "normal" injection, e.g. the VM-Exit interception check, payload delivery, and nested exception code is all bypassed as those flows only apply during initial injection. When FRED comes along, the special casing will only get worse, as FRED explicitly tracks nested exceptions and essentially delivers the payload on the stack frame, i.e. re-injection will need more inputs, and normal injection will have yet more code that needs to be bypassed when KVM is re-injecting an exception. No functional change intended. Signed-off-by: Xin Li (Intel) <xin@zytor.com> Tested-by: Shan Kang <shan.kang@intel.com> Link: https://lore.kernel.org/r/20241001050110.3643764-2-xin@zytor.com Signed-off-by: Sean Christopherson <seanjc@google.com>
1 parent 4fa0efb commit b50cb2b

File tree

4 files changed

+63
-61
lines changed

4 files changed

+63
-61
lines changed

arch/x86/include/asm/kvm_host.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2161,8 +2161,8 @@ int kvm_emulate_rdpmc(struct kvm_vcpu *vcpu);
21612161
void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
21622162
void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
21632163
void kvm_queue_exception_p(struct kvm_vcpu *vcpu, unsigned nr, unsigned long payload);
2164-
void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr);
2165-
void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
2164+
void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned int nr,
2165+
bool has_error_code, u32 error_code);
21662166
void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault);
21672167
void kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu,
21682168
struct x86_exception *fault);

arch/x86/kvm/svm/svm.c

Lines changed: 9 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -4116,20 +4116,23 @@ static void svm_complete_interrupts(struct kvm_vcpu *vcpu)
41164116
vcpu->arch.nmi_injected = true;
41174117
svm->nmi_l1_to_l2 = nmi_l1_to_l2;
41184118
break;
4119-
case SVM_EXITINTINFO_TYPE_EXEPT:
4119+
case SVM_EXITINTINFO_TYPE_EXEPT: {
4120+
u32 error_code = 0;
4121+
41204122
/*
41214123
* Never re-inject a #VC exception.
41224124
*/
41234125
if (vector == X86_TRAP_VC)
41244126
break;
41254127

4126-
if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) {
4127-
u32 err = svm->vmcb->control.exit_int_info_err;
4128-
kvm_requeue_exception_e(vcpu, vector, err);
4128+
if (exitintinfo & SVM_EXITINTINFO_VALID_ERR)
4129+
error_code = svm->vmcb->control.exit_int_info_err;
41294130

4130-
} else
4131-
kvm_requeue_exception(vcpu, vector);
4131+
kvm_requeue_exception(vcpu, vector,
4132+
exitintinfo & SVM_EXITINTINFO_VALID_ERR,
4133+
error_code);
41324134
break;
4135+
}
41334136
case SVM_EXITINTINFO_TYPE_INTR:
41344137
kvm_queue_interrupt(vcpu, vector, false);
41354138
break;

arch/x86/kvm/vmx/vmx.c

Lines changed: 10 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -7155,13 +7155,17 @@ static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu,
71557155
case INTR_TYPE_SOFT_EXCEPTION:
71567156
vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field);
71577157
fallthrough;
7158-
case INTR_TYPE_HARD_EXCEPTION:
7159-
if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) {
7160-
u32 err = vmcs_read32(error_code_field);
7161-
kvm_requeue_exception_e(vcpu, vector, err);
7162-
} else
7163-
kvm_requeue_exception(vcpu, vector);
7158+
case INTR_TYPE_HARD_EXCEPTION: {
7159+
u32 error_code = 0;
7160+
7161+
if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK)
7162+
error_code = vmcs_read32(error_code_field);
7163+
7164+
kvm_requeue_exception(vcpu, vector,
7165+
idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK,
7166+
error_code);
71647167
break;
7168+
}
71657169
case INTR_TYPE_SOFT_INTR:
71667170
vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field);
71677171
fallthrough;

arch/x86/kvm/x86.c

Lines changed: 42 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -800,23 +800,20 @@ static void kvm_queue_exception_vmexit(struct kvm_vcpu *vcpu, unsigned int vecto
800800
ex->payload = payload;
801801
}
802802

803-
static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
804-
unsigned nr, bool has_error, u32 error_code,
805-
bool has_payload, unsigned long payload, bool reinject)
803+
static void kvm_multiple_exception(struct kvm_vcpu *vcpu, unsigned int nr,
804+
bool has_error, u32 error_code,
805+
bool has_payload, unsigned long payload)
806806
{
807807
u32 prev_nr;
808808
int class1, class2;
809809

810810
kvm_make_request(KVM_REQ_EVENT, vcpu);
811811

812812
/*
813-
* If the exception is destined for L2 and isn't being reinjected,
814-
* morph it to a VM-Exit if L1 wants to intercept the exception. A
815-
* previously injected exception is not checked because it was checked
816-
* when it was original queued, and re-checking is incorrect if _L1_
817-
* injected the exception, in which case it's exempt from interception.
813+
* If the exception is destined for L2, morph it to a VM-Exit if L1
814+
* wants to intercept the exception.
818815
*/
819-
if (!reinject && is_guest_mode(vcpu) &&
816+
if (is_guest_mode(vcpu) &&
820817
kvm_x86_ops.nested_ops->is_exception_vmexit(vcpu, nr, error_code)) {
821818
kvm_queue_exception_vmexit(vcpu, nr, has_error, error_code,
822819
has_payload, payload);
@@ -825,28 +822,9 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
825822

826823
if (!vcpu->arch.exception.pending && !vcpu->arch.exception.injected) {
827824
queue:
828-
if (reinject) {
829-
/*
830-
* On VM-Entry, an exception can be pending if and only
831-
* if event injection was blocked by nested_run_pending.
832-
* In that case, however, vcpu_enter_guest() requests an
833-
* immediate exit, and the guest shouldn't proceed far
834-
* enough to need reinjection.
835-
*/
836-
WARN_ON_ONCE(kvm_is_exception_pending(vcpu));
837-
vcpu->arch.exception.injected = true;
838-
if (WARN_ON_ONCE(has_payload)) {
839-
/*
840-
* A reinjected event has already
841-
* delivered its payload.
842-
*/
843-
has_payload = false;
844-
payload = 0;
845-
}
846-
} else {
847-
vcpu->arch.exception.pending = true;
848-
vcpu->arch.exception.injected = false;
849-
}
825+
vcpu->arch.exception.pending = true;
826+
vcpu->arch.exception.injected = false;
827+
850828
vcpu->arch.exception.has_error_code = has_error;
851829
vcpu->arch.exception.vector = nr;
852830
vcpu->arch.exception.error_code = error_code;
@@ -887,29 +865,52 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
887865

888866
void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
889867
{
890-
kvm_multiple_exception(vcpu, nr, false, 0, false, 0, false);
868+
kvm_multiple_exception(vcpu, nr, false, 0, false, 0);
891869
}
892870
EXPORT_SYMBOL_GPL(kvm_queue_exception);
893871

894-
void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)
895-
{
896-
kvm_multiple_exception(vcpu, nr, false, 0, false, 0, true);
897-
}
898-
EXPORT_SYMBOL_GPL(kvm_requeue_exception);
899872

900873
void kvm_queue_exception_p(struct kvm_vcpu *vcpu, unsigned nr,
901874
unsigned long payload)
902875
{
903-
kvm_multiple_exception(vcpu, nr, false, 0, true, payload, false);
876+
kvm_multiple_exception(vcpu, nr, false, 0, true, payload);
904877
}
905878
EXPORT_SYMBOL_GPL(kvm_queue_exception_p);
906879

907880
static void kvm_queue_exception_e_p(struct kvm_vcpu *vcpu, unsigned nr,
908881
u32 error_code, unsigned long payload)
909882
{
910-
kvm_multiple_exception(vcpu, nr, true, error_code,
911-
true, payload, false);
883+
kvm_multiple_exception(vcpu, nr, true, error_code, true, payload);
884+
}
885+
886+
void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned int nr,
887+
bool has_error_code, u32 error_code)
888+
{
889+
890+
/*
891+
* On VM-Entry, an exception can be pending if and only if event
892+
* injection was blocked by nested_run_pending. In that case, however,
893+
* vcpu_enter_guest() requests an immediate exit, and the guest
894+
* shouldn't proceed far enough to need reinjection.
895+
*/
896+
WARN_ON_ONCE(kvm_is_exception_pending(vcpu));
897+
898+
/*
899+
* Do not check for interception when injecting an event for L2, as the
900+
* exception was checked for intercept when it was original queued, and
901+
* re-checking is incorrect if _L1_ injected the exception, in which
902+
* case it's exempt from interception.
903+
*/
904+
kvm_make_request(KVM_REQ_EVENT, vcpu);
905+
906+
vcpu->arch.exception.injected = true;
907+
vcpu->arch.exception.has_error_code = has_error_code;
908+
vcpu->arch.exception.vector = nr;
909+
vcpu->arch.exception.error_code = error_code;
910+
vcpu->arch.exception.has_payload = false;
911+
vcpu->arch.exception.payload = 0;
912912
}
913+
EXPORT_SYMBOL_GPL(kvm_requeue_exception);
913914

914915
int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err)
915916
{
@@ -980,16 +981,10 @@ void kvm_inject_nmi(struct kvm_vcpu *vcpu)
980981

981982
void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
982983
{
983-
kvm_multiple_exception(vcpu, nr, true, error_code, false, 0, false);
984+
kvm_multiple_exception(vcpu, nr, true, error_code, false, 0);
984985
}
985986
EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
986987

987-
void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
988-
{
989-
kvm_multiple_exception(vcpu, nr, true, error_code, false, 0, true);
990-
}
991-
EXPORT_SYMBOL_GPL(kvm_requeue_exception_e);
992-
993988
/*
994989
* Checks if cpl <= required_cpl; if true, return true. Otherwise queue
995990
* a #GP and return false.

0 commit comments

Comments
 (0)