Skip to content

Commit 5279d6f

Browse files
committed
KVM: SVM: Use guard(mutex) to simplify SNP vCPU state updates
Use guard(mutex) in sev_snp_init_protected_guest_state() and pull in its lock-protected inner helper. Without an unlock trampoline (and even with one), there is no real need for an inner helper. Eliminating the helper also avoids having to fixup the open coded "lockdep" WARN_ON(). Opportunistically drop the error message if KVM can't obtain the pfn for the new target VMSA. The error message provides zero information that can't be gleaned from the fact that the vCPU is stuck. Reviewed-by: Tom Lendacky <thomas.lendacky@amd.com> Link: https://lore.kernel.org/r/20250227012541.3234589-10-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
1 parent e268bee commit 5279d6f

File tree

1 file changed

+50
-66
lines changed

1 file changed

+50
-66
lines changed

arch/x86/kvm/svm/sev.c

Lines changed: 50 additions & 66 deletions
Original file line numberDiff line numberDiff line change
@@ -3842,11 +3842,26 @@ static int snp_begin_psc(struct vcpu_svm *svm, struct psc_buffer *psc)
38423842
BUG();
38433843
}
38443844

3845-
static int __sev_snp_update_protected_guest_state(struct kvm_vcpu *vcpu)
3845+
/*
3846+
* Invoked as part of svm_vcpu_reset() processing of an init event.
3847+
*/
3848+
void sev_snp_init_protected_guest_state(struct kvm_vcpu *vcpu)
38463849
{
38473850
struct vcpu_svm *svm = to_svm(vcpu);
3851+
struct kvm_memory_slot *slot;
3852+
struct page *page;
3853+
kvm_pfn_t pfn;
3854+
gfn_t gfn;
3855+
3856+
if (!sev_snp_guest(vcpu->kvm))
3857+
return;
38483858

3849-
WARN_ON(!mutex_is_locked(&svm->sev_es.snp_vmsa_mutex));
3859+
guard(mutex)(&svm->sev_es.snp_vmsa_mutex);
3860+
3861+
if (!svm->sev_es.snp_ap_waiting_for_reset)
3862+
return;
3863+
3864+
svm->sev_es.snp_ap_waiting_for_reset = false;
38503865

38513866
/* Mark the vCPU as offline and not runnable */
38523867
vcpu->arch.pv.pv_unhalted = false;
@@ -3861,79 +3876,48 @@ static int __sev_snp_update_protected_guest_state(struct kvm_vcpu *vcpu)
38613876
*/
38623877
vmcb_mark_all_dirty(svm->vmcb);
38633878

3864-
if (VALID_PAGE(svm->sev_es.snp_vmsa_gpa)) {
3865-
gfn_t gfn = gpa_to_gfn(svm->sev_es.snp_vmsa_gpa);
3866-
struct kvm_memory_slot *slot;
3867-
struct page *page;
3868-
kvm_pfn_t pfn;
3869-
3870-
slot = gfn_to_memslot(vcpu->kvm, gfn);
3871-
if (!slot)
3872-
return -EINVAL;
3873-
3874-
/*
3875-
* The new VMSA will be private memory guest memory, so
3876-
* retrieve the PFN from the gmem backend.
3877-
*/
3878-
if (kvm_gmem_get_pfn(vcpu->kvm, slot, gfn, &pfn, &page, NULL))
3879-
return -EINVAL;
3880-
3881-
/*
3882-
* From this point forward, the VMSA will always be a
3883-
* guest-mapped page rather than the initial one allocated
3884-
* by KVM in svm->sev_es.vmsa. In theory, svm->sev_es.vmsa
3885-
* could be free'd and cleaned up here, but that involves
3886-
* cleanups like wbinvd_on_all_cpus() which would ideally
3887-
* be handled during teardown rather than guest boot.
3888-
* Deferring that also allows the existing logic for SEV-ES
3889-
* VMSAs to be re-used with minimal SNP-specific changes.
3890-
*/
3891-
svm->sev_es.snp_has_guest_vmsa = true;
3892-
3893-
/* Use the new VMSA */
3894-
svm->vmcb->control.vmsa_pa = pfn_to_hpa(pfn);
3895-
3896-
/* Mark the vCPU as runnable */
3897-
vcpu->arch.pv.pv_unhalted = false;
3898-
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
3899-
3900-
svm->sev_es.snp_vmsa_gpa = INVALID_PAGE;
3901-
3902-
/*
3903-
* gmem pages aren't currently migratable, but if this ever
3904-
* changes then care should be taken to ensure
3905-
* svm->sev_es.vmsa is pinned through some other means.
3906-
*/
3907-
kvm_release_page_clean(page);
3908-
}
3879+
if (!VALID_PAGE(svm->sev_es.snp_vmsa_gpa))
3880+
return;
39093881

3910-
return 0;
3911-
}
3882+
gfn = gpa_to_gfn(svm->sev_es.snp_vmsa_gpa);
39123883

3913-
/*
3914-
* Invoked as part of svm_vcpu_reset() processing of an init event.
3915-
*/
3916-
void sev_snp_init_protected_guest_state(struct kvm_vcpu *vcpu)
3917-
{
3918-
struct vcpu_svm *svm = to_svm(vcpu);
3919-
int ret;
3884+
slot = gfn_to_memslot(vcpu->kvm, gfn);
3885+
if (!slot)
3886+
return;
39203887

3921-
if (!sev_snp_guest(vcpu->kvm))
3888+
/*
3889+
* The new VMSA will be private memory guest memory, so retrieve the
3890+
* PFN from the gmem backend.
3891+
*/
3892+
if (kvm_gmem_get_pfn(vcpu->kvm, slot, gfn, &pfn, &page, NULL))
39223893
return;
39233894

3924-
mutex_lock(&svm->sev_es.snp_vmsa_mutex);
3895+
/*
3896+
* From this point forward, the VMSA will always be a guest-mapped page
3897+
* rather than the initial one allocated by KVM in svm->sev_es.vmsa. In
3898+
* theory, svm->sev_es.vmsa could be free'd and cleaned up here, but
3899+
* that involves cleanups like wbinvd_on_all_cpus() which would ideally
3900+
* be handled during teardown rather than guest boot. Deferring that
3901+
* also allows the existing logic for SEV-ES VMSAs to be re-used with
3902+
* minimal SNP-specific changes.
3903+
*/
3904+
svm->sev_es.snp_has_guest_vmsa = true;
39253905

3926-
if (!svm->sev_es.snp_ap_waiting_for_reset)
3927-
goto unlock;
3906+
/* Use the new VMSA */
3907+
svm->vmcb->control.vmsa_pa = pfn_to_hpa(pfn);
39283908

3929-
svm->sev_es.snp_ap_waiting_for_reset = false;
3909+
/* Mark the vCPU as runnable */
3910+
vcpu->arch.pv.pv_unhalted = false;
3911+
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
39303912

3931-
ret = __sev_snp_update_protected_guest_state(vcpu);
3932-
if (ret)
3933-
vcpu_unimpl(vcpu, "snp: AP state update on init failed\n");
3913+
svm->sev_es.snp_vmsa_gpa = INVALID_PAGE;
39343914

3935-
unlock:
3936-
mutex_unlock(&svm->sev_es.snp_vmsa_mutex);
3915+
/*
3916+
* gmem pages aren't currently migratable, but if this ever changes
3917+
* then care should be taken to ensure svm->sev_es.vmsa is pinned
3918+
* through some other means.
3919+
*/
3920+
kvm_release_page_clean(page);
39373921
}
39383922

39393923
static int sev_snp_ap_creation(struct vcpu_svm *svm)

0 commit comments

Comments
 (0)