Skip to content

Commit ecf9b7b

Browse files
committed
Merge tag 'x86_core_for_v6.0_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 core updates from Borislav Petkov: - Have invalid MSR accesses warnings appear only once after a pr_warn_once() change broke that - Simplify {JMP,CALL}_NOSPEC and let the objtool retpoline patching infra take care of them instead of having unreadable alternative macros there * tag 'x86_core_for_v6.0_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/extable: Fix ex_handler_msr() print condition x86,nospec: Simplify {JMP,CALL}_NOSPEC
2 parents 98b1783 + a1a5482 commit ecf9b7b

File tree

3 files changed

+43
-17
lines changed

3 files changed

+43
-17
lines changed

arch/x86/include/asm/nospec-branch.h

Lines changed: 18 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -93,26 +93,38 @@
9393
#endif
9494
.endm
9595

96+
/*
97+
* Equivalent to -mindirect-branch-cs-prefix; emit the 5 byte jmp/call
98+
* to the retpoline thunk with a CS prefix when the register requires
99+
* a RAX prefix byte to encode. Also see apply_retpolines().
100+
*/
101+
.macro __CS_PREFIX reg:req
102+
.irp rs,r8,r9,r10,r11,r12,r13,r14,r15
103+
.ifc \reg,\rs
104+
.byte 0x2e
105+
.endif
106+
.endr
107+
.endm
108+
96109
/*
97110
* JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple
98111
* indirect jmp/call which may be susceptible to the Spectre variant 2
99112
* attack.
100113
*/
101114
.macro JMP_NOSPEC reg:req
102115
#ifdef CONFIG_RETPOLINE
103-
ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), \
104-
__stringify(jmp __x86_indirect_thunk_\reg), X86_FEATURE_RETPOLINE, \
105-
__stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), X86_FEATURE_RETPOLINE_LFENCE
116+
__CS_PREFIX \reg
117+
jmp __x86_indirect_thunk_\reg
106118
#else
107119
jmp *%\reg
120+
int3
108121
#endif
109122
.endm
110123

111124
.macro CALL_NOSPEC reg:req
112125
#ifdef CONFIG_RETPOLINE
113-
ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; call *%\reg), \
114-
__stringify(call __x86_indirect_thunk_\reg), X86_FEATURE_RETPOLINE, \
115-
__stringify(lfence; ANNOTATE_RETPOLINE_SAFE; call *%\reg), X86_FEATURE_RETPOLINE_LFENCE
126+
__CS_PREFIX \reg
127+
call __x86_indirect_thunk_\reg
116128
#else
117129
call *%\reg
118130
#endif

arch/x86/mm/extable.c

Lines changed: 9 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -94,16 +94,18 @@ static bool ex_handler_copy(const struct exception_table_entry *fixup,
9494
static bool ex_handler_msr(const struct exception_table_entry *fixup,
9595
struct pt_regs *regs, bool wrmsr, bool safe, int reg)
9696
{
97-
if (!safe && wrmsr &&
98-
pr_warn_once("unchecked MSR access error: WRMSR to 0x%x (tried to write 0x%08x%08x) at rIP: 0x%lx (%pS)\n",
99-
(unsigned int)regs->cx, (unsigned int)regs->dx,
100-
(unsigned int)regs->ax, regs->ip, (void *)regs->ip))
97+
if (__ONCE_LITE_IF(!safe && wrmsr)) {
98+
pr_warn("unchecked MSR access error: WRMSR to 0x%x (tried to write 0x%08x%08x) at rIP: 0x%lx (%pS)\n",
99+
(unsigned int)regs->cx, (unsigned int)regs->dx,
100+
(unsigned int)regs->ax, regs->ip, (void *)regs->ip);
101101
show_stack_regs(regs);
102+
}
102103

103-
if (!safe && !wrmsr &&
104-
pr_warn_once("unchecked MSR access error: RDMSR from 0x%x at rIP: 0x%lx (%pS)\n",
105-
(unsigned int)regs->cx, regs->ip, (void *)regs->ip))
104+
if (__ONCE_LITE_IF(!safe && !wrmsr)) {
105+
pr_warn("unchecked MSR access error: RDMSR from 0x%x at rIP: 0x%lx (%pS)\n",
106+
(unsigned int)regs->cx, regs->ip, (void *)regs->ip);
106107
show_stack_regs(regs);
108+
}
107109

108110
if (!wrmsr) {
109111
/* Pretend that the read succeeded and returned 0. */

include/linux/once_lite.h

Lines changed: 16 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -9,15 +9,27 @@
99
*/
1010
#define DO_ONCE_LITE(func, ...) \
1111
DO_ONCE_LITE_IF(true, func, ##__VA_ARGS__)
12-
#define DO_ONCE_LITE_IF(condition, func, ...) \
12+
13+
#define __ONCE_LITE_IF(condition) \
1314
({ \
1415
static bool __section(".data.once") __already_done; \
15-
bool __ret_do_once = !!(condition); \
16+
bool __ret_cond = !!(condition); \
17+
bool __ret_once = false; \
1618
\
17-
if (unlikely(__ret_do_once && !__already_done)) { \
19+
if (unlikely(__ret_cond && !__already_done)) { \
1820
__already_done = true; \
19-
func(__VA_ARGS__); \
21+
__ret_once = true; \
2022
} \
23+
unlikely(__ret_once); \
24+
})
25+
26+
#define DO_ONCE_LITE_IF(condition, func, ...) \
27+
({ \
28+
bool __ret_do_once = !!(condition); \
29+
\
30+
if (__ONCE_LITE_IF(__ret_do_once)) \
31+
func(__VA_ARGS__); \
32+
\
2133
unlikely(__ret_do_once); \
2234
})
2335

0 commit comments

Comments
 (0)