3
3
#include <asm/asm.h>
4
4
#include <asm/asm-offsets.h>
5
5
#include <asm/bitsperlong.h>
6
+ #include <asm/frame.h>
6
7
#include <asm/kvm_vcpu_regs.h>
7
8
#include <asm/nospec-branch.h>
8
9
#include "kvm-asm-offsets.h"
67
68
"", X86_FEATURE_V_SPEC_CTRL
68
69
901:
69
70
.endm
70
- .macro RESTORE_HOST_SPEC_CTRL_BODY
71
+ .macro RESTORE_HOST_SPEC_CTRL_BODY spec_ctrl_intercepted: req
71
72
900:
72
73
/* Same for after vmexit. */
73
74
mov $MSR_IA32_SPEC_CTRL, %ecx
76
77
* Load the value that the guest had written into MSR_IA32_SPEC_CTRL,
77
78
* if it was not intercepted during guest execution.
78
79
*/
79
- cmpb $0 , (%_ASM_SP)
80
+ cmpb $0 , \spec_ctrl_intercepted
80
81
jnz 998f
81
82
rdmsr
82
83
movl %eax , SVM_spec_ctrl(%_ASM_DI)
99
100
*/
100
101
SYM_FUNC_START(__svm_vcpu_run)
101
102
push %_ASM_BP
103
+ mov %_ASM_SP, %_ASM_BP
102
104
#ifdef CONFIG_X86_64
103
105
push %r15
104
106
push %r14
@@ -268,7 +270,7 @@ SYM_FUNC_START(__svm_vcpu_run)
268
270
RET
269
271
270
272
RESTORE_GUEST_SPEC_CTRL_BODY
271
- RESTORE_HOST_SPEC_CTRL_BODY
273
+ RESTORE_HOST_SPEC_CTRL_BODY (%_ASM_SP)
272
274
273
275
10: cmpb $0 , _ASM_RIP(kvm_rebooting)
274
276
jne 2b
@@ -290,66 +292,68 @@ SYM_FUNC_START(__svm_vcpu_run)
290
292
291
293
SYM_FUNC_END(__svm_vcpu_run)
292
294
295
+ #ifdef CONFIG_KVM_AMD_SEV
296
+
297
+
298
+ #ifdef CONFIG_X86_64
299
+ #define SEV_ES_GPRS_BASE 0x300
300
+ #define SEV_ES_RBX (SEV_ES_GPRS_BASE + __VCPU_REGS_RBX * WORD_SIZE)
301
+ #define SEV_ES_RBP (SEV_ES_GPRS_BASE + __VCPU_REGS_RBP * WORD_SIZE)
302
+ #define SEV_ES_RSI (SEV_ES_GPRS_BASE + __VCPU_REGS_RSI * WORD_SIZE)
303
+ #define SEV_ES_RDI (SEV_ES_GPRS_BASE + __VCPU_REGS_RDI * WORD_SIZE)
304
+ #define SEV_ES_R12 (SEV_ES_GPRS_BASE + __VCPU_REGS_R12 * WORD_SIZE)
305
+ #define SEV_ES_R13 (SEV_ES_GPRS_BASE + __VCPU_REGS_R13 * WORD_SIZE)
306
+ #define SEV_ES_R14 (SEV_ES_GPRS_BASE + __VCPU_REGS_R14 * WORD_SIZE)
307
+ #define SEV_ES_R15 (SEV_ES_GPRS_BASE + __VCPU_REGS_R15 * WORD_SIZE)
308
+ #endif
309
+
293
310
/**
294
311
* __svm_sev_es_vcpu_run - Run a SEV-ES vCPU via a transition to SVM guest mode
295
312
* @svm: struct vcpu_svm *
296
313
* @spec_ctrl_intercepted: bool
297
314
*/
298
315
SYM_FUNC_START(__svm_sev_es_vcpu_run)
299
- push %_ASM_BP
300
- #ifdef CONFIG_X86_64
301
- push %r15
302
- push %r14
303
- push %r13
304
- push %r12
305
- #else
306
- push %edi
307
- push %esi
308
- #endif
309
- push %_ASM_BX
316
+ FRAME_BEGIN
310
317
311
318
/*
312
- * Save variables needed after vmexit on the stack, in inverse
313
- * order compared to when they are needed.
319
+ * Save non-volatile (callee-saved) registers to the host save area.
320
+ * Except for RAX and RSP, all GPRs are restored on #VMEXIT, but not
321
+ * saved on VMRUN.
314
322
*/
323
+ mov %rbp , SEV_ES_RBP (%rdx )
324
+ mov %r15 , SEV_ES_R15 (%rdx )
325
+ mov %r14 , SEV_ES_R14 (%rdx )
326
+ mov %r13 , SEV_ES_R13 (%rdx )
327
+ mov %r12 , SEV_ES_R12 (%rdx )
328
+ mov %rbx , SEV_ES_RBX (%rdx )
315
329
316
- /* Accessed directly from the stack in RESTORE_HOST_SPEC_CTRL. */
317
- push %_ASM_ARG2
318
-
319
- /* Save @svm. */
320
- push %_ASM_ARG1
321
-
322
- .ifnc _ASM_ARG1, _ASM_DI
323
330
/*
324
- * Stash @svm in RDI early. On 32-bit, arguments are in RAX, RCX
325
- * and RDX which are clobbered by RESTORE_GUEST_SPEC_CTRL .
331
+ * Save volatile registers that hold arguments that are needed after
332
+ * #VMEXIT (RDI=@svm and RSI=@spec_ctrl_intercepted) .
326
333
*/
327
- mov %_ASM_ARG1, %_ASM_DI
328
- .endif
334
+ mov %rdi , SEV_ES_RDI ( %rdx )
335
+ mov %rsi , SEV_ES_RSI ( %rdx )
329
336
330
- /* Clobbers RAX, RCX, RDX. */
337
+ /* Clobbers RAX, RCX, RDX (@hostsa). */
331
338
RESTORE_GUEST_SPEC_CTRL
332
339
333
340
/* Get svm->current_vmcb->pa into RAX. */
334
- mov SVM_current_vmcb(%_ASM_DI ), %_ASM_AX
335
- mov KVM_VMCB_pa(%_ASM_AX ), %_ASM_AX
341
+ mov SVM_current_vmcb(%rdi ), %rax
342
+ mov KVM_VMCB_pa(%rax ), %rax
336
343
337
344
/* Enter guest mode */
338
345
sti
339
346
340
- 1: vmrun %_ASM_AX
347
+ 1: vmrun %rax
341
348
342
349
2: cli
343
350
344
- /* Pop @svm to RDI, guest registers have been saved already. */
345
- pop %_ASM_DI
346
-
347
351
#ifdef CONFIG_MITIGATION_RETPOLINE
348
352
/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
349
- FILL_RETURN_BUFFER %_ASM_AX , RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
353
+ FILL_RETURN_BUFFER %rax , RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
350
354
#endif
351
355
352
- /* Clobbers RAX, RCX, RDX. */
356
+ /* Clobbers RAX, RCX, RDX, consumes RDI (@svm) and RSI (@spec_ctrl_intercepted). */
353
357
RESTORE_HOST_SPEC_CTRL
354
358
355
359
/*
@@ -361,30 +365,17 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run)
361
365
*/
362
366
UNTRAIN_RET_VM
363
367
364
- /* "Pop" @spec_ctrl_intercepted. */
365
- pop %_ASM_BX
366
-
367
- pop %_ASM_BX
368
-
369
- #ifdef CONFIG_X86_64
370
- pop %r12
371
- pop %r13
372
- pop %r14
373
- pop %r15
374
- #else
375
- pop %esi
376
- pop %edi
377
- #endif
378
- pop %_ASM_BP
368
+ FRAME_END
379
369
RET
380
370
381
371
RESTORE_GUEST_SPEC_CTRL_BODY
382
- RESTORE_HOST_SPEC_CTRL_BODY
372
+ RESTORE_HOST_SPEC_CTRL_BODY %sil
383
373
384
- 3: cmpb $0 , _ASM_RIP( kvm_rebooting)
374
+ 3: cmpb $0 , kvm_rebooting( %rip )
385
375
jne 2b
386
376
ud2
387
377
388
378
_ASM_EXTABLE(1b, 3b)
389
379
390
380
SYM_FUNC_END(__svm_sev_es_vcpu_run)
381
+ #endif /* CONFIG_KVM_AMD_SEV */
0 commit comments