Skip to content

Commit 44ecfa3

Browse files
committed
Merge branch 'svm' of https://github.com/kvm-x86/linux into HEAD
Clean up SVM's enter/exit assembly code so that it can be compiled without OBJECT_FILES_NON_STANDARD. The "standard" __svm_vcpu_run() can't be made 100% bulletproof, as RBP isn't restored on #VMEXIT, but that's also the case for __vmx_vcpu_run(), and getting "close enough" is better than not even trying. As for SEV-ES, after yet another refresher on swap types, I realized KVM can simply let the hardware restore registers after #VMEXIT, all that's missing is storing the current values to the host save area (they are swap type B). This should provide 100% accuracy when using stack frames for unwinding, and requires less assembly. In between, build the SEV-ES code iff CONFIG_KVM_AMD_SEV=y, and yank out "support" for 32-bit kernels in __svm_sev_es_vcpu_run, which was unnecessarily polluting the code for a configuration that is disabled at build time. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2 parents 1c3bed8 + 27ca867 commit 44ecfa3

File tree

5 files changed

+57
-67
lines changed

5 files changed

+57
-67
lines changed

arch/x86/kvm/Makefile

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -3,11 +3,6 @@
33
ccflags-y += -I $(srctree)/arch/x86/kvm
44
ccflags-$(CONFIG_KVM_WERROR) += -Werror
55

6-
ifeq ($(CONFIG_FRAME_POINTER),y)
7-
OBJECT_FILES_NON_STANDARD_vmx/vmenter.o := y
8-
OBJECT_FILES_NON_STANDARD_svm/vmenter.o := y
9-
endif
10-
116
include $(srctree)/virt/kvm/Makefile.kvm
127

138
kvm-y += x86.o emulate.o i8259.o irq.o lapic.o \

arch/x86/kvm/svm/sev.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -434,7 +434,7 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
434434
/* Avoid using vmalloc for smaller buffers. */
435435
size = npages * sizeof(struct page *);
436436
if (size > PAGE_SIZE)
437-
pages = __vmalloc(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO);
437+
pages = __vmalloc(size, GFP_KERNEL_ACCOUNT);
438438
else
439439
pages = kmalloc(size, GFP_KERNEL_ACCOUNT);
440440

arch/x86/kvm/svm/svm.c

Lines changed: 10 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1503,6 +1503,11 @@ static void svm_vcpu_free(struct kvm_vcpu *vcpu)
15031503
__free_pages(virt_to_page(svm->msrpm), get_order(MSRPM_SIZE));
15041504
}
15051505

1506+
static struct sev_es_save_area *sev_es_host_save_area(struct svm_cpu_data *sd)
1507+
{
1508+
return page_address(sd->save_area) + 0x400;
1509+
}
1510+
15061511
static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
15071512
{
15081513
struct vcpu_svm *svm = to_svm(vcpu);
@@ -1519,12 +1524,8 @@ static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
15191524
* or subsequent vmload of host save area.
15201525
*/
15211526
vmsave(sd->save_area_pa);
1522-
if (sev_es_guest(vcpu->kvm)) {
1523-
struct sev_es_save_area *hostsa;
1524-
hostsa = (struct sev_es_save_area *)(page_address(sd->save_area) + 0x400);
1525-
1526-
sev_es_prepare_switch_to_guest(hostsa);
1527-
}
1527+
if (sev_es_guest(vcpu->kvm))
1528+
sev_es_prepare_switch_to_guest(sev_es_host_save_area(sd));
15281529

15291530
if (tsc_scaling)
15301531
__svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio);
@@ -4101,14 +4102,16 @@ static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
41014102

41024103
static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_intercepted)
41034104
{
4105+
struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, vcpu->cpu);
41044106
struct vcpu_svm *svm = to_svm(vcpu);
41054107

41064108
guest_state_enter_irqoff();
41074109

41084110
amd_clear_divider();
41094111

41104112
if (sev_es_guest(vcpu->kvm))
4111-
__svm_sev_es_vcpu_run(svm, spec_ctrl_intercepted);
4113+
__svm_sev_es_vcpu_run(svm, spec_ctrl_intercepted,
4114+
sev_es_host_save_area(sd));
41124115
else
41134116
__svm_vcpu_run(svm, spec_ctrl_intercepted);
41144117

arch/x86/kvm/svm/svm.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -698,7 +698,8 @@ struct page *snp_safe_alloc_page(struct kvm_vcpu *vcpu);
698698

699699
/* vmenter.S */
700700

701-
void __svm_sev_es_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted);
701+
void __svm_sev_es_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted,
702+
struct sev_es_save_area *hostsa);
702703
void __svm_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted);
703704

704705
#define DEFINE_KVM_GHCB_ACCESSORS(field) \

arch/x86/kvm/svm/vmenter.S

Lines changed: 44 additions & 53 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
#include <asm/asm.h>
44
#include <asm/asm-offsets.h>
55
#include <asm/bitsperlong.h>
6+
#include <asm/frame.h>
67
#include <asm/kvm_vcpu_regs.h>
78
#include <asm/nospec-branch.h>
89
#include "kvm-asm-offsets.h"
@@ -67,7 +68,7 @@
6768
"", X86_FEATURE_V_SPEC_CTRL
6869
901:
6970
.endm
70-
.macro RESTORE_HOST_SPEC_CTRL_BODY
71+
.macro RESTORE_HOST_SPEC_CTRL_BODY spec_ctrl_intercepted:req
7172
900:
7273
/* Same for after vmexit. */
7374
mov $MSR_IA32_SPEC_CTRL, %ecx
@@ -76,7 +77,7 @@
7677
* Load the value that the guest had written into MSR_IA32_SPEC_CTRL,
7778
* if it was not intercepted during guest execution.
7879
*/
79-
cmpb $0, (%_ASM_SP)
80+
cmpb $0, \spec_ctrl_intercepted
8081
jnz 998f
8182
rdmsr
8283
movl %eax, SVM_spec_ctrl(%_ASM_DI)
@@ -99,6 +100,7 @@
99100
*/
100101
SYM_FUNC_START(__svm_vcpu_run)
101102
push %_ASM_BP
103+
mov %_ASM_SP, %_ASM_BP
102104
#ifdef CONFIG_X86_64
103105
push %r15
104106
push %r14
@@ -268,7 +270,7 @@ SYM_FUNC_START(__svm_vcpu_run)
268270
RET
269271

270272
RESTORE_GUEST_SPEC_CTRL_BODY
271-
RESTORE_HOST_SPEC_CTRL_BODY
273+
RESTORE_HOST_SPEC_CTRL_BODY (%_ASM_SP)
272274

273275
10: cmpb $0, _ASM_RIP(kvm_rebooting)
274276
jne 2b
@@ -290,66 +292,68 @@ SYM_FUNC_START(__svm_vcpu_run)
290292

291293
SYM_FUNC_END(__svm_vcpu_run)
292294

295+
#ifdef CONFIG_KVM_AMD_SEV
296+
297+
298+
#ifdef CONFIG_X86_64
299+
#define SEV_ES_GPRS_BASE 0x300
300+
#define SEV_ES_RBX (SEV_ES_GPRS_BASE + __VCPU_REGS_RBX * WORD_SIZE)
301+
#define SEV_ES_RBP (SEV_ES_GPRS_BASE + __VCPU_REGS_RBP * WORD_SIZE)
302+
#define SEV_ES_RSI (SEV_ES_GPRS_BASE + __VCPU_REGS_RSI * WORD_SIZE)
303+
#define SEV_ES_RDI (SEV_ES_GPRS_BASE + __VCPU_REGS_RDI * WORD_SIZE)
304+
#define SEV_ES_R12 (SEV_ES_GPRS_BASE + __VCPU_REGS_R12 * WORD_SIZE)
305+
#define SEV_ES_R13 (SEV_ES_GPRS_BASE + __VCPU_REGS_R13 * WORD_SIZE)
306+
#define SEV_ES_R14 (SEV_ES_GPRS_BASE + __VCPU_REGS_R14 * WORD_SIZE)
307+
#define SEV_ES_R15 (SEV_ES_GPRS_BASE + __VCPU_REGS_R15 * WORD_SIZE)
308+
#endif
309+
293310
/**
294311
* __svm_sev_es_vcpu_run - Run a SEV-ES vCPU via a transition to SVM guest mode
295312
* @svm: struct vcpu_svm *
296313
* @spec_ctrl_intercepted: bool
297314
*/
298315
SYM_FUNC_START(__svm_sev_es_vcpu_run)
299-
push %_ASM_BP
300-
#ifdef CONFIG_X86_64
301-
push %r15
302-
push %r14
303-
push %r13
304-
push %r12
305-
#else
306-
push %edi
307-
push %esi
308-
#endif
309-
push %_ASM_BX
316+
FRAME_BEGIN
310317

311318
/*
312-
* Save variables needed after vmexit on the stack, in inverse
313-
* order compared to when they are needed.
319+
* Save non-volatile (callee-saved) registers to the host save area.
320+
* Except for RAX and RSP, all GPRs are restored on #VMEXIT, but not
321+
* saved on VMRUN.
314322
*/
323+
mov %rbp, SEV_ES_RBP (%rdx)
324+
mov %r15, SEV_ES_R15 (%rdx)
325+
mov %r14, SEV_ES_R14 (%rdx)
326+
mov %r13, SEV_ES_R13 (%rdx)
327+
mov %r12, SEV_ES_R12 (%rdx)
328+
mov %rbx, SEV_ES_RBX (%rdx)
315329

316-
/* Accessed directly from the stack in RESTORE_HOST_SPEC_CTRL. */
317-
push %_ASM_ARG2
318-
319-
/* Save @svm. */
320-
push %_ASM_ARG1
321-
322-
.ifnc _ASM_ARG1, _ASM_DI
323330
/*
324-
* Stash @svm in RDI early. On 32-bit, arguments are in RAX, RCX
325-
* and RDX which are clobbered by RESTORE_GUEST_SPEC_CTRL.
331+
* Save volatile registers that hold arguments that are needed after
332+
* #VMEXIT (RDI=@svm and RSI=@spec_ctrl_intercepted).
326333
*/
327-
mov %_ASM_ARG1, %_ASM_DI
328-
.endif
334+
mov %rdi, SEV_ES_RDI (%rdx)
335+
mov %rsi, SEV_ES_RSI (%rdx)
329336

330-
/* Clobbers RAX, RCX, RDX. */
337+
/* Clobbers RAX, RCX, RDX (@hostsa). */
331338
RESTORE_GUEST_SPEC_CTRL
332339

333340
/* Get svm->current_vmcb->pa into RAX. */
334-
mov SVM_current_vmcb(%_ASM_DI), %_ASM_AX
335-
mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX
341+
mov SVM_current_vmcb(%rdi), %rax
342+
mov KVM_VMCB_pa(%rax), %rax
336343

337344
/* Enter guest mode */
338345
sti
339346

340-
1: vmrun %_ASM_AX
347+
1: vmrun %rax
341348

342349
2: cli
343350

344-
/* Pop @svm to RDI, guest registers have been saved already. */
345-
pop %_ASM_DI
346-
347351
#ifdef CONFIG_MITIGATION_RETPOLINE
348352
/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
349-
FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
353+
FILL_RETURN_BUFFER %rax, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
350354
#endif
351355

352-
/* Clobbers RAX, RCX, RDX. */
356+
/* Clobbers RAX, RCX, RDX, consumes RDI (@svm) and RSI (@spec_ctrl_intercepted). */
353357
RESTORE_HOST_SPEC_CTRL
354358

355359
/*
@@ -361,30 +365,17 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run)
361365
*/
362366
UNTRAIN_RET_VM
363367

364-
/* "Pop" @spec_ctrl_intercepted. */
365-
pop %_ASM_BX
366-
367-
pop %_ASM_BX
368-
369-
#ifdef CONFIG_X86_64
370-
pop %r12
371-
pop %r13
372-
pop %r14
373-
pop %r15
374-
#else
375-
pop %esi
376-
pop %edi
377-
#endif
378-
pop %_ASM_BP
368+
FRAME_END
379369
RET
380370

381371
RESTORE_GUEST_SPEC_CTRL_BODY
382-
RESTORE_HOST_SPEC_CTRL_BODY
372+
RESTORE_HOST_SPEC_CTRL_BODY %sil
383373

384-
3: cmpb $0, _ASM_RIP(kvm_rebooting)
374+
3: cmpb $0, kvm_rebooting(%rip)
385375
jne 2b
386376
ud2
387377

388378
_ASM_EXTABLE(1b, 3b)
389379

390380
SYM_FUNC_END(__svm_sev_es_vcpu_run)
381+
#endif /* CONFIG_KVM_AMD_SEV */

0 commit comments

Comments
 (0)