Skip to content

Commit 55e6f8f

Browse files
committed
Merge tag 'kvm-x86-svm-6.12' of https://github.com/kvm-x86/linux into HEAD
KVM SVM changes for 6.12: - Don't stuff the RSB after VM-Exit when RETPOLINE=y and AutoIBRS is enabled, i.e. when the CPU has already flushed the RSB. - Trace the per-CPU host save area as a VMCB pointer to improve readability and cleanup the retrieval of the SEV-ES host save area. - Remove unnecessary accounting of temporary nested VMCB related allocations.
2 parents 43d97b2 + 4440337 commit 55e6f8f

File tree

5 files changed

+47
-27
lines changed

5 files changed

+47
-27
lines changed

arch/x86/include/asm/svm.h

Lines changed: 15 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -516,6 +516,20 @@ struct ghcb {
516516
u32 ghcb_usage;
517517
} __packed;
518518

519+
struct vmcb {
520+
struct vmcb_control_area control;
521+
union {
522+
struct vmcb_save_area save;
523+
524+
/*
525+
* For SEV-ES VMs, the save area in the VMCB is used only to
526+
* save/load host state. Guest state resides in a separate
527+
* page, the aptly named VM Save Area (VMSA), that is encrypted
528+
* with the guest's private key.
529+
*/
530+
struct sev_es_save_area host_sev_es_save;
531+
};
532+
} __packed;
519533

520534
#define EXPECTED_VMCB_SAVE_AREA_SIZE 744
521535
#define EXPECTED_GHCB_SAVE_AREA_SIZE 1032
@@ -532,6 +546,7 @@ static inline void __unused_size_checks(void)
532546
BUILD_BUG_ON(sizeof(struct ghcb_save_area) != EXPECTED_GHCB_SAVE_AREA_SIZE);
533547
BUILD_BUG_ON(sizeof(struct sev_es_save_area) != EXPECTED_SEV_ES_SAVE_AREA_SIZE);
534548
BUILD_BUG_ON(sizeof(struct vmcb_control_area) != EXPECTED_VMCB_CONTROL_AREA_SIZE);
549+
BUILD_BUG_ON(offsetof(struct vmcb, save) != EXPECTED_VMCB_CONTROL_AREA_SIZE);
535550
BUILD_BUG_ON(sizeof(struct ghcb) != EXPECTED_GHCB_SIZE);
536551

537552
/* Check offsets of reserved fields */
@@ -568,11 +583,6 @@ static inline void __unused_size_checks(void)
568583
BUILD_BUG_RESERVED_OFFSET(ghcb, 0xff0);
569584
}
570585

571-
struct vmcb {
572-
struct vmcb_control_area control;
573-
struct vmcb_save_area save;
574-
} __packed;
575-
576586
#define SVM_CPUID_FUNC 0x8000000a
577587

578588
#define SVM_SELECTOR_S_SHIFT 4

arch/x86/kvm/svm/nested.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1693,8 +1693,8 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
16931693
return -EINVAL;
16941694

16951695
ret = -ENOMEM;
1696-
ctl = kzalloc(sizeof(*ctl), GFP_KERNEL_ACCOUNT);
1697-
save = kzalloc(sizeof(*save), GFP_KERNEL_ACCOUNT);
1696+
ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
1697+
save = kzalloc(sizeof(*save), GFP_KERNEL);
16981698
if (!ctl || !save)
16991699
goto out_free;
17001700

arch/x86/kvm/svm/svm.c

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -573,7 +573,7 @@ static void __svm_write_tsc_multiplier(u64 multiplier)
573573

574574
static __always_inline struct sev_es_save_area *sev_es_host_save_area(struct svm_cpu_data *sd)
575575
{
576-
return page_address(sd->save_area) + 0x400;
576+
return &sd->save_area->host_sev_es_save;
577577
}
578578

579579
static inline void kvm_cpu_svm_disable(void)
@@ -696,31 +696,32 @@ static void svm_cpu_uninit(int cpu)
696696
return;
697697

698698
kfree(sd->sev_vmcbs);
699-
__free_page(sd->save_area);
699+
__free_page(__sme_pa_to_page(sd->save_area_pa));
700700
sd->save_area_pa = 0;
701701
sd->save_area = NULL;
702702
}
703703

704704
static int svm_cpu_init(int cpu)
705705
{
706706
struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, cpu);
707+
struct page *save_area_page;
707708
int ret = -ENOMEM;
708709

709710
memset(sd, 0, sizeof(struct svm_cpu_data));
710-
sd->save_area = snp_safe_alloc_page_node(cpu_to_node(cpu), GFP_KERNEL);
711-
if (!sd->save_area)
711+
save_area_page = snp_safe_alloc_page_node(cpu_to_node(cpu), GFP_KERNEL);
712+
if (!save_area_page)
712713
return ret;
713714

714715
ret = sev_cpu_init(sd);
715716
if (ret)
716717
goto free_save_area;
717718

718-
sd->save_area_pa = __sme_page_pa(sd->save_area);
719+
sd->save_area = page_address(save_area_page);
720+
sd->save_area_pa = __sme_page_pa(save_area_page);
719721
return 0;
720722

721723
free_save_area:
722-
__free_page(sd->save_area);
723-
sd->save_area = NULL;
724+
__free_page(save_area_page);
724725
return ret;
725726

726727
}
@@ -1124,8 +1125,7 @@ static void svm_hardware_unsetup(void)
11241125
for_each_possible_cpu(cpu)
11251126
svm_cpu_uninit(cpu);
11261127

1127-
__free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT),
1128-
get_order(IOPM_SIZE));
1128+
__free_pages(__sme_pa_to_page(iopm_base), get_order(IOPM_SIZE));
11291129
iopm_base = 0;
11301130
}
11311131

@@ -1301,7 +1301,7 @@ static void init_vmcb(struct kvm_vcpu *vcpu)
13011301
if (!kvm_hlt_in_guest(vcpu->kvm))
13021302
svm_set_intercept(svm, INTERCEPT_HLT);
13031303

1304-
control->iopm_base_pa = __sme_set(iopm_base);
1304+
control->iopm_base_pa = iopm_base;
13051305
control->msrpm_base_pa = __sme_set(__pa(svm->msrpm));
13061306
control->int_ctl = V_INTR_MASKING_MASK;
13071307

@@ -1503,7 +1503,7 @@ static void svm_vcpu_free(struct kvm_vcpu *vcpu)
15031503

15041504
sev_free_vcpu(vcpu);
15051505

1506-
__free_page(pfn_to_page(__sme_clr(svm->vmcb01.pa) >> PAGE_SHIFT));
1506+
__free_page(__sme_pa_to_page(svm->vmcb01.pa));
15071507
__free_pages(virt_to_page(svm->msrpm), get_order(MSRPM_SIZE));
15081508
}
15091509

@@ -5281,7 +5281,7 @@ static __init int svm_hardware_setup(void)
52815281

52825282
iopm_va = page_address(iopm_pages);
52835283
memset(iopm_va, 0xff, PAGE_SIZE * (1 << order));
5284-
iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
5284+
iopm_base = __sme_page_pa(iopm_pages);
52855285

52865286
init_msrpm_offsets();
52875287

arch/x86/kvm/svm/svm.h

Lines changed: 16 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,21 @@
2525
#include "cpuid.h"
2626
#include "kvm_cache_regs.h"
2727

28-
#define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
28+
/*
29+
* Helpers to convert to/from physical addresses for pages whose address is
30+
* consumed directly by hardware. Even though it's a physical address, SVM
31+
* often restricts the address to the natural width, hence 'unsigned long'
32+
* instead of 'hpa_t'.
33+
*/
34+
static inline unsigned long __sme_page_pa(struct page *page)
35+
{
36+
return __sme_set(page_to_pfn(page) << PAGE_SHIFT);
37+
}
38+
39+
static inline struct page *__sme_pa_to_page(unsigned long pa)
40+
{
41+
return pfn_to_page(__sme_clr(pa) >> PAGE_SHIFT);
42+
}
2943

3044
#define IOPM_SIZE PAGE_SIZE * 3
3145
#define MSRPM_SIZE PAGE_SIZE * 2
@@ -321,7 +335,7 @@ struct svm_cpu_data {
321335
u32 next_asid;
322336
u32 min_asid;
323337

324-
struct page *save_area;
338+
struct vmcb *save_area;
325339
unsigned long save_area_pa;
326340

327341
struct vmcb *current_vmcb;

arch/x86/kvm/svm/vmenter.S

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -209,10 +209,8 @@ SYM_FUNC_START(__svm_vcpu_run)
209209
7: vmload %_ASM_AX
210210
8:
211211

212-
#ifdef CONFIG_MITIGATION_RETPOLINE
213212
/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
214-
FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
215-
#endif
213+
FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_VMEXIT
216214

217215
/* Clobbers RAX, RCX, RDX. */
218216
RESTORE_HOST_SPEC_CTRL
@@ -348,10 +346,8 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run)
348346

349347
2: cli
350348

351-
#ifdef CONFIG_MITIGATION_RETPOLINE
352349
/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
353-
FILL_RETURN_BUFFER %rax, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
354-
#endif
350+
FILL_RETURN_BUFFER %rax, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_VMEXIT
355351

356352
/* Clobbers RAX, RCX, RDX, consumes RDI (@svm) and RSI (@spec_ctrl_intercepted). */
357353
RESTORE_HOST_SPEC_CTRL

0 commit comments

Comments
 (0)