@@ -3842,11 +3842,26 @@ static int snp_begin_psc(struct vcpu_svm *svm, struct psc_buffer *psc)
3842
3842
BUG ();
3843
3843
}
3844
3844
3845
- static int __sev_snp_update_protected_guest_state (struct kvm_vcpu * vcpu )
3845
+ /*
3846
+ * Invoked as part of svm_vcpu_reset() processing of an init event.
3847
+ */
3848
+ void sev_snp_init_protected_guest_state (struct kvm_vcpu * vcpu )
3846
3849
{
3847
3850
struct vcpu_svm * svm = to_svm (vcpu );
3851
+ struct kvm_memory_slot * slot ;
3852
+ struct page * page ;
3853
+ kvm_pfn_t pfn ;
3854
+ gfn_t gfn ;
3855
+
3856
+ if (!sev_snp_guest (vcpu -> kvm ))
3857
+ return ;
3848
3858
3849
- WARN_ON (!mutex_is_locked (& svm -> sev_es .snp_vmsa_mutex ));
3859
+ guard (mutex )(& svm -> sev_es .snp_vmsa_mutex );
3860
+
3861
+ if (!svm -> sev_es .snp_ap_waiting_for_reset )
3862
+ return ;
3863
+
3864
+ svm -> sev_es .snp_ap_waiting_for_reset = false;
3850
3865
3851
3866
/* Mark the vCPU as offline and not runnable */
3852
3867
vcpu -> arch .pv .pv_unhalted = false;
@@ -3861,79 +3876,48 @@ static int __sev_snp_update_protected_guest_state(struct kvm_vcpu *vcpu)
3861
3876
*/
3862
3877
vmcb_mark_all_dirty (svm -> vmcb );
3863
3878
3864
- if (VALID_PAGE (svm -> sev_es .snp_vmsa_gpa )) {
3865
- gfn_t gfn = gpa_to_gfn (svm -> sev_es .snp_vmsa_gpa );
3866
- struct kvm_memory_slot * slot ;
3867
- struct page * page ;
3868
- kvm_pfn_t pfn ;
3869
-
3870
- slot = gfn_to_memslot (vcpu -> kvm , gfn );
3871
- if (!slot )
3872
- return - EINVAL ;
3873
-
3874
- /*
3875
- * The new VMSA will be private memory guest memory, so
3876
- * retrieve the PFN from the gmem backend.
3877
- */
3878
- if (kvm_gmem_get_pfn (vcpu -> kvm , slot , gfn , & pfn , & page , NULL ))
3879
- return - EINVAL ;
3880
-
3881
- /*
3882
- * From this point forward, the VMSA will always be a
3883
- * guest-mapped page rather than the initial one allocated
3884
- * by KVM in svm->sev_es.vmsa. In theory, svm->sev_es.vmsa
3885
- * could be free'd and cleaned up here, but that involves
3886
- * cleanups like wbinvd_on_all_cpus() which would ideally
3887
- * be handled during teardown rather than guest boot.
3888
- * Deferring that also allows the existing logic for SEV-ES
3889
- * VMSAs to be re-used with minimal SNP-specific changes.
3890
- */
3891
- svm -> sev_es .snp_has_guest_vmsa = true;
3892
-
3893
- /* Use the new VMSA */
3894
- svm -> vmcb -> control .vmsa_pa = pfn_to_hpa (pfn );
3895
-
3896
- /* Mark the vCPU as runnable */
3897
- vcpu -> arch .pv .pv_unhalted = false;
3898
- vcpu -> arch .mp_state = KVM_MP_STATE_RUNNABLE ;
3899
-
3900
- svm -> sev_es .snp_vmsa_gpa = INVALID_PAGE ;
3901
-
3902
- /*
3903
- * gmem pages aren't currently migratable, but if this ever
3904
- * changes then care should be taken to ensure
3905
- * svm->sev_es.vmsa is pinned through some other means.
3906
- */
3907
- kvm_release_page_clean (page );
3908
- }
3879
+ if (!VALID_PAGE (svm -> sev_es .snp_vmsa_gpa ))
3880
+ return ;
3909
3881
3910
- return 0 ;
3911
- }
3882
+ gfn = gpa_to_gfn (svm -> sev_es .snp_vmsa_gpa );
3912
3883
3913
- /*
3914
- * Invoked as part of svm_vcpu_reset() processing of an init event.
3915
- */
3916
- void sev_snp_init_protected_guest_state (struct kvm_vcpu * vcpu )
3917
- {
3918
- struct vcpu_svm * svm = to_svm (vcpu );
3919
- int ret ;
3884
+ slot = gfn_to_memslot (vcpu -> kvm , gfn );
3885
+ if (!slot )
3886
+ return ;
3920
3887
3921
- if (!sev_snp_guest (vcpu -> kvm ))
3888
+ /*
3889
+ * The new VMSA will be private memory guest memory, so retrieve the
3890
+ * PFN from the gmem backend.
3891
+ */
3892
+ if (kvm_gmem_get_pfn (vcpu -> kvm , slot , gfn , & pfn , & page , NULL ))
3922
3893
return ;
3923
3894
3924
- mutex_lock (& svm -> sev_es .snp_vmsa_mutex );
3895
+ /*
3896
+ * From this point forward, the VMSA will always be a guest-mapped page
3897
+ * rather than the initial one allocated by KVM in svm->sev_es.vmsa. In
3898
+ * theory, svm->sev_es.vmsa could be free'd and cleaned up here, but
3899
+ * that involves cleanups like wbinvd_on_all_cpus() which would ideally
3900
+ * be handled during teardown rather than guest boot. Deferring that
3901
+ * also allows the existing logic for SEV-ES VMSAs to be re-used with
3902
+ * minimal SNP-specific changes.
3903
+ */
3904
+ svm -> sev_es .snp_has_guest_vmsa = true;
3925
3905
3926
- if (! svm -> sev_es . snp_ap_waiting_for_reset )
3927
- goto unlock ;
3906
+ /* Use the new VMSA */
3907
+ svm -> vmcb -> control . vmsa_pa = pfn_to_hpa ( pfn ) ;
3928
3908
3929
- svm -> sev_es .snp_ap_waiting_for_reset = false;
3909
+ /* Mark the vCPU as runnable */
3910
+ vcpu -> arch .pv .pv_unhalted = false;
3911
+ vcpu -> arch .mp_state = KVM_MP_STATE_RUNNABLE ;
3930
3912
3931
- ret = __sev_snp_update_protected_guest_state (vcpu );
3932
- if (ret )
3933
- vcpu_unimpl (vcpu , "snp: AP state update on init failed\n" );
3913
+ svm -> sev_es .snp_vmsa_gpa = INVALID_PAGE ;
3934
3914
3935
- unlock :
3936
- mutex_unlock (& svm -> sev_es .snp_vmsa_mutex );
3915
+ /*
3916
+ * gmem pages aren't currently migratable, but if this ever changes
3917
+ * then care should be taken to ensure svm->sev_es.vmsa is pinned
3918
+ * through some other means.
3919
+ */
3920
+ kvm_release_page_clean (page );
3937
3921
}
3938
3922
3939
3923
static int sev_snp_ap_creation (struct vcpu_svm * svm )
0 commit comments