Skip to content

Commit 08b0644

Browse files
committed
Merge tag 'x86_urgent_for_v6.1_rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 fixes from Borislav Petkov: - ioremap: mask out the bits which are not part of the physical address *after* the size computation is done to prevent any hypothetical ioremap failures - Change the MSR save/restore functionality during suspend to rely on flags denoting that the related MSRs are actually supported vs reading them and assuming they are (an Atom one allows reading but not writing, thus breaking this scheme at resume time) - prevent IV reuse in the AES-GCM communication scheme between SNP guests and the AMD secure processor * tag 'x86_urgent_for_v6.1_rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/ioremap: Fix page aligned size calculation in __ioremap_caller() x86/pm: Add enumeration check before spec MSRs save/restore setup x86/tsx: Add a feature bit for TSX control MSR support virt/sev-guest: Prevent IV reuse in the SNP guest driver
2 parents 5afcab2 + 4dbd6a3 commit 08b0644

File tree

5 files changed

+112
-44
lines changed

5 files changed

+112
-44
lines changed

arch/x86/include/asm/cpufeatures.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -305,6 +305,9 @@
305305
#define X86_FEATURE_USE_IBPB_FW (11*32+16) /* "" Use IBPB during runtime firmware calls */
306306
#define X86_FEATURE_RSB_VMEXIT_LITE (11*32+17) /* "" Fill RSB on VM exit when EIBRS is enabled */
307307

308+
309+
#define X86_FEATURE_MSR_TSX_CTRL (11*32+20) /* "" MSR IA32_TSX_CTRL (Intel) implemented */
310+
308311
/* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
309312
#define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */
310313
#define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* AVX512 BFLOAT16 instructions */

arch/x86/kernel/cpu/tsx.c

Lines changed: 17 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -58,24 +58,6 @@ static void tsx_enable(void)
5858
wrmsrl(MSR_IA32_TSX_CTRL, tsx);
5959
}
6060

61-
static bool tsx_ctrl_is_supported(void)
62-
{
63-
u64 ia32_cap = x86_read_arch_cap_msr();
64-
65-
/*
66-
* TSX is controlled via MSR_IA32_TSX_CTRL. However, support for this
67-
* MSR is enumerated by ARCH_CAP_TSX_MSR bit in MSR_IA32_ARCH_CAPABILITIES.
68-
*
69-
* TSX control (aka MSR_IA32_TSX_CTRL) is only available after a
70-
* microcode update on CPUs that have their MSR_IA32_ARCH_CAPABILITIES
71-
* bit MDS_NO=1. CPUs with MDS_NO=0 are not planned to get
72-
* MSR_IA32_TSX_CTRL support even after a microcode update. Thus,
73-
* tsx= cmdline requests will do nothing on CPUs without
74-
* MSR_IA32_TSX_CTRL support.
75-
*/
76-
return !!(ia32_cap & ARCH_CAP_TSX_CTRL_MSR);
77-
}
78-
7961
static enum tsx_ctrl_states x86_get_tsx_auto_mode(void)
8062
{
8163
if (boot_cpu_has_bug(X86_BUG_TAA))
@@ -135,7 +117,7 @@ static void tsx_clear_cpuid(void)
135117
rdmsrl(MSR_TSX_FORCE_ABORT, msr);
136118
msr |= MSR_TFA_TSX_CPUID_CLEAR;
137119
wrmsrl(MSR_TSX_FORCE_ABORT, msr);
138-
} else if (tsx_ctrl_is_supported()) {
120+
} else if (cpu_feature_enabled(X86_FEATURE_MSR_TSX_CTRL)) {
139121
rdmsrl(MSR_IA32_TSX_CTRL, msr);
140122
msr |= TSX_CTRL_CPUID_CLEAR;
141123
wrmsrl(MSR_IA32_TSX_CTRL, msr);
@@ -158,7 +140,8 @@ static void tsx_dev_mode_disable(void)
158140
u64 mcu_opt_ctrl;
159141

160142
/* Check if RTM_ALLOW exists */
161-
if (!boot_cpu_has_bug(X86_BUG_TAA) || !tsx_ctrl_is_supported() ||
143+
if (!boot_cpu_has_bug(X86_BUG_TAA) ||
144+
!cpu_feature_enabled(X86_FEATURE_MSR_TSX_CTRL) ||
162145
!cpu_feature_enabled(X86_FEATURE_SRBDS_CTRL))
163146
return;
164147

@@ -191,7 +174,20 @@ void __init tsx_init(void)
191174
return;
192175
}
193176

194-
if (!tsx_ctrl_is_supported()) {
177+
/*
178+
* TSX is controlled via MSR_IA32_TSX_CTRL. However, support for this
179+
* MSR is enumerated by ARCH_CAP_TSX_MSR bit in MSR_IA32_ARCH_CAPABILITIES.
180+
*
181+
* TSX control (aka MSR_IA32_TSX_CTRL) is only available after a
182+
* microcode update on CPUs that have their MSR_IA32_ARCH_CAPABILITIES
183+
* bit MDS_NO=1. CPUs with MDS_NO=0 are not planned to get
184+
* MSR_IA32_TSX_CTRL support even after a microcode update. Thus,
185+
* tsx= cmdline requests will do nothing on CPUs without
186+
* MSR_IA32_TSX_CTRL support.
187+
*/
188+
if (x86_read_arch_cap_msr() & ARCH_CAP_TSX_CTRL_MSR) {
189+
setup_force_cpu_cap(X86_FEATURE_MSR_TSX_CTRL);
190+
} else {
195191
tsx_ctrl_state = TSX_CTRL_NOT_SUPPORTED;
196192
return;
197193
}

arch/x86/mm/ioremap.c

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -217,9 +217,15 @@ __ioremap_caller(resource_size_t phys_addr, unsigned long size,
217217
* Mappings have to be page-aligned
218218
*/
219219
offset = phys_addr & ~PAGE_MASK;
220-
phys_addr &= PHYSICAL_PAGE_MASK;
220+
phys_addr &= PAGE_MASK;
221221
size = PAGE_ALIGN(last_addr+1) - phys_addr;
222222

223+
/*
224+
* Mask out any bits not part of the actual physical
225+
* address, like memory encryption bits.
226+
*/
227+
phys_addr &= PHYSICAL_PAGE_MASK;
228+
223229
retval = memtype_reserve(phys_addr, (u64)phys_addr + size,
224230
pcm, &new_pcm);
225231
if (retval) {

arch/x86/power/cpu.c

Lines changed: 15 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -513,16 +513,23 @@ static int pm_cpu_check(const struct x86_cpu_id *c)
513513

514514
static void pm_save_spec_msr(void)
515515
{
516-
u32 spec_msr_id[] = {
517-
MSR_IA32_SPEC_CTRL,
518-
MSR_IA32_TSX_CTRL,
519-
MSR_TSX_FORCE_ABORT,
520-
MSR_IA32_MCU_OPT_CTRL,
521-
MSR_AMD64_LS_CFG,
522-
MSR_AMD64_DE_CFG,
516+
struct msr_enumeration {
517+
u32 msr_no;
518+
u32 feature;
519+
} msr_enum[] = {
520+
{ MSR_IA32_SPEC_CTRL, X86_FEATURE_MSR_SPEC_CTRL },
521+
{ MSR_IA32_TSX_CTRL, X86_FEATURE_MSR_TSX_CTRL },
522+
{ MSR_TSX_FORCE_ABORT, X86_FEATURE_TSX_FORCE_ABORT },
523+
{ MSR_IA32_MCU_OPT_CTRL, X86_FEATURE_SRBDS_CTRL },
524+
{ MSR_AMD64_LS_CFG, X86_FEATURE_LS_CFG_SSBD },
525+
{ MSR_AMD64_DE_CFG, X86_FEATURE_LFENCE_RDTSC },
523526
};
527+
int i;
524528

525-
msr_build_context(spec_msr_id, ARRAY_SIZE(spec_msr_id));
529+
for (i = 0; i < ARRAY_SIZE(msr_enum); i++) {
530+
if (boot_cpu_has(msr_enum[i].feature))
531+
msr_build_context(&msr_enum[i].msr_no, 1);
532+
}
526533
}
527534

528535
static int pm_check_save_msr(void)

drivers/virt/coco/sev-guest/sev-guest.c

Lines changed: 70 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -67,8 +67,27 @@ static bool is_vmpck_empty(struct snp_guest_dev *snp_dev)
6767
return true;
6868
}
6969

70+
/*
71+
* If an error is received from the host or AMD Secure Processor (ASP) there
72+
* are two options. Either retry the exact same encrypted request or discontinue
73+
* using the VMPCK.
74+
*
75+
* This is because in the current encryption scheme GHCB v2 uses AES-GCM to
76+
* encrypt the requests. The IV for this scheme is the sequence number. GCM
77+
* cannot tolerate IV reuse.
78+
*
79+
* The ASP FW v1.51 only increments the sequence numbers on a successful
80+
* guest<->ASP back and forth and only accepts messages at its exact sequence
81+
* number.
82+
*
83+
* So if the sequence number were to be reused the encryption scheme is
84+
* vulnerable. If the sequence number were incremented for a fresh IV the ASP
85+
* will reject the request.
86+
*/
7087
static void snp_disable_vmpck(struct snp_guest_dev *snp_dev)
7188
{
89+
dev_alert(snp_dev->dev, "Disabling vmpck_id %d to prevent IV reuse.\n",
90+
vmpck_id);
7291
memzero_explicit(snp_dev->vmpck, VMPCK_KEY_LEN);
7392
snp_dev->vmpck = NULL;
7493
}
@@ -321,34 +340,71 @@ static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, in
321340
if (rc)
322341
return rc;
323342

324-
/* Call firmware to process the request */
343+
/*
344+
* Call firmware to process the request. In this function the encrypted
345+
* message enters shared memory with the host. So after this call the
346+
* sequence number must be incremented or the VMPCK must be deleted to
347+
* prevent reuse of the IV.
348+
*/
325349
rc = snp_issue_guest_request(exit_code, &snp_dev->input, &err);
350+
351+
/*
352+
* If the extended guest request fails due to having too small of a
353+
* certificate data buffer, retry the same guest request without the
354+
* extended data request in order to increment the sequence number
355+
* and thus avoid IV reuse.
356+
*/
357+
if (exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST &&
358+
err == SNP_GUEST_REQ_INVALID_LEN) {
359+
const unsigned int certs_npages = snp_dev->input.data_npages;
360+
361+
exit_code = SVM_VMGEXIT_GUEST_REQUEST;
362+
363+
/*
364+
* If this call to the firmware succeeds, the sequence number can
365+
* be incremented allowing for continued use of the VMPCK. If
366+
* there is an error reflected in the return value, this value
367+
* is checked further down and the result will be the deletion
368+
* of the VMPCK and the error code being propagated back to the
369+
* user as an ioctl() return code.
370+
*/
371+
rc = snp_issue_guest_request(exit_code, &snp_dev->input, &err);
372+
373+
/*
374+
* Override the error to inform callers the given extended
375+
* request buffer size was too small and give the caller the
376+
* required buffer size.
377+
*/
378+
err = SNP_GUEST_REQ_INVALID_LEN;
379+
snp_dev->input.data_npages = certs_npages;
380+
}
381+
326382
if (fw_err)
327383
*fw_err = err;
328384

329-
if (rc)
330-
return rc;
385+
if (rc) {
386+
dev_alert(snp_dev->dev,
387+
"Detected error from ASP request. rc: %d, fw_err: %llu\n",
388+
rc, *fw_err);
389+
goto disable_vmpck;
390+
}
331391

332-
/*
333-
* The verify_and_dec_payload() will fail only if the hypervisor is
334-
* actively modifying the message header or corrupting the encrypted payload.
335-
* This hints that hypervisor is acting in a bad faith. Disable the VMPCK so that
336-
* the key cannot be used for any communication. The key is disabled to ensure
337-
* that AES-GCM does not use the same IV while encrypting the request payload.
338-
*/
339392
rc = verify_and_dec_payload(snp_dev, resp_buf, resp_sz);
340393
if (rc) {
341394
dev_alert(snp_dev->dev,
342-
"Detected unexpected decode failure, disabling the vmpck_id %d\n",
343-
vmpck_id);
344-
snp_disable_vmpck(snp_dev);
345-
return rc;
395+
"Detected unexpected decode failure from ASP. rc: %d\n",
396+
rc);
397+
goto disable_vmpck;
346398
}
347399

348400
/* Increment to new message sequence after payload decryption was successful. */
349401
snp_inc_msg_seqno(snp_dev);
350402

351403
return 0;
404+
405+
disable_vmpck:
406+
snp_disable_vmpck(snp_dev);
407+
return rc;
352408
}
353409

354410
static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)

0 commit comments

Comments
 (0)