Skip to content

Commit 6613d82

Browse files
pa1guptahansendc
authored andcommitted
x86/bugs: Use ALTERNATIVE() instead of mds_user_clear static key
The VERW mitigation at exit-to-user is enabled via a static branch mds_user_clear. This static branch is never toggled after boot, and can be safely replaced with an ALTERNATIVE() which is convenient to use in asm. Switch to ALTERNATIVE() to use the VERW mitigation late in exit-to-user path. Also remove the now redundant VERW in exc_nmi() and arch_exit_to_user_mode(). Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com> Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> Link: https://lore.kernel.org/all/20240213-delay-verw-v8-4-a6216d83edb7%40linux.intel.com
1 parent a0e2dab commit 6613d82

File tree

6 files changed

+34
-37
lines changed

6 files changed

+34
-37
lines changed

Documentation/arch/x86/mds.rst

Lines changed: 27 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -95,6 +95,9 @@ The kernel provides a function to invoke the buffer clearing:
9595

9696
mds_clear_cpu_buffers()
9797

98+
Also macro CLEAR_CPU_BUFFERS can be used in ASM late in exit-to-user path.
99+
Other than CFLAGS.ZF, this macro doesn't clobber any registers.
100+
98101
The mitigation is invoked on kernel/userspace, hypervisor/guest and C-state
99102
(idle) transitions.
100103

@@ -138,17 +141,30 @@ Mitigation points
138141

139142
When transitioning from kernel to user space the CPU buffers are flushed
140143
on affected CPUs when the mitigation is not disabled on the kernel
141-
command line. The migitation is enabled through the static key
142-
mds_user_clear.
143-
144-
The mitigation is invoked in prepare_exit_to_usermode() which covers
145-
all but one of the kernel to user space transitions. The exception
146-
is when we return from a Non Maskable Interrupt (NMI), which is
147-
handled directly in do_nmi().
148-
149-
(The reason that NMI is special is that prepare_exit_to_usermode() can
150-
enable IRQs. In NMI context, NMIs are blocked, and we don't want to
151-
enable IRQs with NMIs blocked.)
144+
command line. The mitigation is enabled through the feature flag
145+
X86_FEATURE_CLEAR_CPU_BUF.
146+
147+
The mitigation is invoked just before transitioning to userspace after
148+
user registers are restored. This is done to minimize the window in
149+
which kernel data could be accessed after VERW e.g. via an NMI after
150+
VERW.
151+
152+
**Corner case not handled**
153+
Interrupts returning to kernel don't clear CPUs buffers since the
154+
exit-to-user path is expected to do that anyways. But, there could be
155+
a case when an NMI is generated in kernel after the exit-to-user path
156+
has cleared the buffers. This case is not handled and NMI returning to
157+
kernel don't clear CPU buffers because:
158+
159+
1. It is rare to get an NMI after VERW, but before returning to userspace.
160+
2. For an unprivileged user, there is no known way to make that NMI
161+
less rare or target it.
162+
3. It would take a large number of these precisely-timed NMIs to mount
163+
an actual attack. There's presumably not enough bandwidth.
164+
4. The NMI in question occurs after a VERW, i.e. when user state is
165+
restored and most interesting data is already scrubbed. Whats left
166+
is only the data that NMI touches, and that may or may not be of
167+
any interest.
152168

153169

154170
2. C-State transition

arch/x86/include/asm/entry-common.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -91,7 +91,6 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
9191

9292
static __always_inline void arch_exit_to_user_mode(void)
9393
{
94-
mds_user_clear_cpu_buffers();
9594
amd_clear_divider();
9695
}
9796
#define arch_exit_to_user_mode arch_exit_to_user_mode

arch/x86/include/asm/nospec-branch.h

Lines changed: 0 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -540,7 +540,6 @@ DECLARE_STATIC_KEY_FALSE(switch_to_cond_stibp);
540540
DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
541541
DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
542542

543-
DECLARE_STATIC_KEY_FALSE(mds_user_clear);
544543
DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
545544

546545
DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
@@ -574,17 +573,6 @@ static __always_inline void mds_clear_cpu_buffers(void)
574573
asm volatile("verw %[ds]" : : [ds] "m" (ds) : "cc");
575574
}
576575

577-
/**
578-
* mds_user_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability
579-
*
580-
* Clear CPU buffers if the corresponding static key is enabled
581-
*/
582-
static __always_inline void mds_user_clear_cpu_buffers(void)
583-
{
584-
if (static_branch_likely(&mds_user_clear))
585-
mds_clear_cpu_buffers();
586-
}
587-
588576
/**
589577
* mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability
590578
*

arch/x86/kernel/cpu/bugs.c

Lines changed: 6 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -111,9 +111,6 @@ DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
111111
/* Control unconditional IBPB in switch_mm() */
112112
DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
113113

114-
/* Control MDS CPU buffer clear before returning to user space */
115-
DEFINE_STATIC_KEY_FALSE(mds_user_clear);
116-
EXPORT_SYMBOL_GPL(mds_user_clear);
117114
/* Control MDS CPU buffer clear before idling (halt, mwait) */
118115
DEFINE_STATIC_KEY_FALSE(mds_idle_clear);
119116
EXPORT_SYMBOL_GPL(mds_idle_clear);
@@ -252,7 +249,7 @@ static void __init mds_select_mitigation(void)
252249
if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
253250
mds_mitigation = MDS_MITIGATION_VMWERV;
254251

255-
static_branch_enable(&mds_user_clear);
252+
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
256253

257254
if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) &&
258255
(mds_nosmt || cpu_mitigations_auto_nosmt()))
@@ -356,7 +353,7 @@ static void __init taa_select_mitigation(void)
356353
* For guests that can't determine whether the correct microcode is
357354
* present on host, enable the mitigation for UCODE_NEEDED as well.
358355
*/
359-
static_branch_enable(&mds_user_clear);
356+
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
360357

361358
if (taa_nosmt || cpu_mitigations_auto_nosmt())
362359
cpu_smt_disable(false);
@@ -424,7 +421,7 @@ static void __init mmio_select_mitigation(void)
424421
*/
425422
if (boot_cpu_has_bug(X86_BUG_MDS) || (boot_cpu_has_bug(X86_BUG_TAA) &&
426423
boot_cpu_has(X86_FEATURE_RTM)))
427-
static_branch_enable(&mds_user_clear);
424+
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
428425
else
429426
static_branch_enable(&mmio_stale_data_clear);
430427

@@ -484,12 +481,12 @@ static void __init md_clear_update_mitigation(void)
484481
if (cpu_mitigations_off())
485482
return;
486483

487-
if (!static_key_enabled(&mds_user_clear))
484+
if (!boot_cpu_has(X86_FEATURE_CLEAR_CPU_BUF))
488485
goto out;
489486

490487
/*
491-
* mds_user_clear is now enabled. Update MDS, TAA and MMIO Stale Data
492-
* mitigation, if necessary.
488+
* X86_FEATURE_CLEAR_CPU_BUF is now enabled. Update MDS, TAA and MMIO
489+
* Stale Data mitigation, if necessary.
493490
*/
494491
if (mds_mitigation == MDS_MITIGATION_OFF &&
495492
boot_cpu_has_bug(X86_BUG_MDS)) {

arch/x86/kernel/nmi.c

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -563,9 +563,6 @@ DEFINE_IDTENTRY_RAW(exc_nmi)
563563
}
564564
if (this_cpu_dec_return(nmi_state))
565565
goto nmi_restart;
566-
567-
if (user_mode(regs))
568-
mds_user_clear_cpu_buffers();
569566
}
570567

571568
#if IS_ENABLED(CONFIG_KVM_INTEL)

arch/x86/kvm/vmx/vmx.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7227,7 +7227,7 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
72277227
/* L1D Flush includes CPU buffer clear to mitigate MDS */
72287228
if (static_branch_unlikely(&vmx_l1d_should_flush))
72297229
vmx_l1d_flush(vcpu);
7230-
else if (static_branch_unlikely(&mds_user_clear))
7230+
else if (cpu_feature_enabled(X86_FEATURE_CLEAR_CPU_BUF))
72317231
mds_clear_cpu_buffers();
72327232
else if (static_branch_unlikely(&mmio_stale_data_clear) &&
72337233
kvm_arch_has_assigned_device(vcpu->kvm))

0 commit comments

Comments
 (0)