Skip to content

Commit c92be2f

Browse files
committed
KVM: SVM: Save/restore non-volatile GPRs in SEV-ES VMRUN via host save area
Use the host save area to save/restore non-volatile (callee-saved) registers in __svm_sev_es_vcpu_run() to take advantage of hardware loading all registers from the save area on #VMEXIT. KVM still needs to save the registers it wants restored, but the loads are handled automatically by hardware. Aside from less assembly code, letting hardware do the restoration means stack frames are preserved for the entirety of __svm_sev_es_vcpu_run(). Opportunistically add a comment to call out why @SVM needs to be saved across VMRUN->#VMEXIT, as it's not easy to decipher that from the macro hell. Cc: Tom Lendacky <thomas.lendacky@amd.com> Cc: Michael Roth <michael.roth@amd.com> Cc: Alexey Kardashevskiy <aik@amd.com> Reviewed-by: Tom Lendacky <thomas.lendacky@amd.com> Link: https://lore.kernel.org/r/20240223204233.3337324-6-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
1 parent 87e8e36 commit c92be2f

File tree

3 files changed

+35
-26
lines changed

3 files changed

+35
-26
lines changed

arch/x86/kvm/svm/svm.c

Lines changed: 10 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1503,6 +1503,11 @@ static void svm_vcpu_free(struct kvm_vcpu *vcpu)
15031503
__free_pages(virt_to_page(svm->msrpm), get_order(MSRPM_SIZE));
15041504
}
15051505

1506+
static struct sev_es_save_area *sev_es_host_save_area(struct svm_cpu_data *sd)
1507+
{
1508+
return page_address(sd->save_area) + 0x400;
1509+
}
1510+
15061511
static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
15071512
{
15081513
struct vcpu_svm *svm = to_svm(vcpu);
@@ -1519,12 +1524,8 @@ static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
15191524
* or subsequent vmload of host save area.
15201525
*/
15211526
vmsave(sd->save_area_pa);
1522-
if (sev_es_guest(vcpu->kvm)) {
1523-
struct sev_es_save_area *hostsa;
1524-
hostsa = (struct sev_es_save_area *)(page_address(sd->save_area) + 0x400);
1525-
1526-
sev_es_prepare_switch_to_guest(hostsa);
1527-
}
1527+
if (sev_es_guest(vcpu->kvm))
1528+
sev_es_prepare_switch_to_guest(sev_es_host_save_area(sd));
15281529

15291530
if (tsc_scaling)
15301531
__svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio);
@@ -4101,14 +4102,16 @@ static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
41014102

41024103
static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_intercepted)
41034104
{
4105+
struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, vcpu->cpu);
41044106
struct vcpu_svm *svm = to_svm(vcpu);
41054107

41064108
guest_state_enter_irqoff();
41074109

41084110
amd_clear_divider();
41094111

41104112
if (sev_es_guest(vcpu->kvm))
4111-
__svm_sev_es_vcpu_run(svm, spec_ctrl_intercepted);
4113+
__svm_sev_es_vcpu_run(svm, spec_ctrl_intercepted,
4114+
sev_es_host_save_area(sd));
41124115
else
41134116
__svm_vcpu_run(svm, spec_ctrl_intercepted);
41144117

arch/x86/kvm/svm/svm.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -698,7 +698,8 @@ struct page *snp_safe_alloc_page(struct kvm_vcpu *vcpu);
698698

699699
/* vmenter.S */
700700

701-
void __svm_sev_es_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted);
701+
void __svm_sev_es_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted,
702+
struct sev_es_save_area *hostsa);
702703
void __svm_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted);
703704

704705
#define DEFINE_KVM_GHCB_ACCESSORS(field) \

arch/x86/kvm/svm/vmenter.S

Lines changed: 23 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -292,31 +292,43 @@ SYM_FUNC_START(__svm_vcpu_run)
292292
SYM_FUNC_END(__svm_vcpu_run)
293293

294294
#ifdef CONFIG_KVM_AMD_SEV
295+
296+
297+
#ifdef CONFIG_X86_64
298+
#define SEV_ES_GPRS_BASE 0x300
299+
#define SEV_ES_RBX (SEV_ES_GPRS_BASE + __VCPU_REGS_RBX * WORD_SIZE)
300+
#define SEV_ES_RBP (SEV_ES_GPRS_BASE + __VCPU_REGS_RBP * WORD_SIZE)
301+
#define SEV_ES_R12 (SEV_ES_GPRS_BASE + __VCPU_REGS_R12 * WORD_SIZE)
302+
#define SEV_ES_R13 (SEV_ES_GPRS_BASE + __VCPU_REGS_R13 * WORD_SIZE)
303+
#define SEV_ES_R14 (SEV_ES_GPRS_BASE + __VCPU_REGS_R14 * WORD_SIZE)
304+
#define SEV_ES_R15 (SEV_ES_GPRS_BASE + __VCPU_REGS_R15 * WORD_SIZE)
305+
#endif
306+
295307
/**
296308
* __svm_sev_es_vcpu_run - Run a SEV-ES vCPU via a transition to SVM guest mode
297309
* @svm: struct vcpu_svm *
298310
* @spec_ctrl_intercepted: bool
299311
*/
300312
SYM_FUNC_START(__svm_sev_es_vcpu_run)
301-
push %rbp
302-
push %r15
303-
push %r14
304-
push %r13
305-
push %r12
306-
push %rbx
307-
308313
/*
309-
* Save variables needed after vmexit on the stack, in inverse
310-
* order compared to when they are needed.
314+
* Save non-volatile (callee-saved) registers to the host save area.
315+
* Except for RAX and RSP, all GPRs are restored on #VMEXIT, but not
316+
* saved on VMRUN.
311317
*/
318+
mov %rbp, SEV_ES_RBP (%rdx)
319+
mov %r15, SEV_ES_R15 (%rdx)
320+
mov %r14, SEV_ES_R14 (%rdx)
321+
mov %r13, SEV_ES_R13 (%rdx)
322+
mov %r12, SEV_ES_R12 (%rdx)
323+
mov %rbx, SEV_ES_RBX (%rdx)
312324

313325
/* Accessed directly from the stack in RESTORE_HOST_SPEC_CTRL. */
314326
push %rsi
315327

316328
/* Save @svm. */
317329
push %rdi
318330

319-
/* Clobbers RAX, RCX, RDX. */
331+
/* Clobbers RAX, RCX, RDX (@hostsa). */
320332
RESTORE_GUEST_SPEC_CTRL
321333

322334
/* Get svm->current_vmcb->pa into RAX. */
@@ -338,7 +350,7 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run)
338350
FILL_RETURN_BUFFER %rax, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
339351
#endif
340352

341-
/* Clobbers RAX, RCX, RDX. */
353+
/* Clobbers RAX, RCX, RDX, consumes RDI (@svm). */
342354
RESTORE_HOST_SPEC_CTRL
343355

344356
/*
@@ -353,13 +365,6 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run)
353365
/* "Pop" and discard @spec_ctrl_intercepted. */
354366
pop %rax
355367

356-
pop %rbx
357-
358-
pop %r12
359-
pop %r13
360-
pop %r14
361-
pop %r15
362-
pop %rbp
363368
RET
364369

365370
RESTORE_GUEST_SPEC_CTRL_BODY

0 commit comments

Comments
 (0)