Skip to content

Commit 5af5d43

Browse files
committed
Merge tag 'x86_misc_for_6.13-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull misc x86 updates from Dave Hansen: "As usual for this branch, these are super random: a compile fix for some newish LLVM checks and making sure a Kconfig text reference to 'RSB' matches the normal definition: - Rework some CPU setup code to keep LLVM happy on 32-bit - Correct RSB terminology in Kconfig text" * tag 'x86_misc_for_6.13-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/cpu: Make sure flag_is_changeable_p() is always being used x86/bugs: Correct RSB terminology in Kconfig
2 parents be9318c + 62e7244 commit 5af5d43

File tree

3 files changed

+31
-33
lines changed

3 files changed

+31
-33
lines changed

arch/x86/Kconfig

Lines changed: 8 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -2564,15 +2564,14 @@ config MITIGATION_CALL_DEPTH_TRACKING
25642564
default y
25652565
help
25662566
Compile the kernel with call depth tracking to mitigate the Intel
2567-
SKL Return-Speculation-Buffer (RSB) underflow issue. The
2568-
mitigation is off by default and needs to be enabled on the
2569-
kernel command line via the retbleed=stuff option. For
2570-
non-affected systems the overhead of this option is marginal as
2571-
the call depth tracking is using run-time generated call thunks
2572-
in a compiler generated padding area and call patching. This
2573-
increases text size by ~5%. For non affected systems this space
2574-
is unused. On affected SKL systems this results in a significant
2575-
performance gain over the IBRS mitigation.
2567+
SKL Return-Stack-Buffer (RSB) underflow issue. The mitigation is off
2568+
by default and needs to be enabled on the kernel command line via the
2569+
retbleed=stuff option. For non-affected systems the overhead of this
2570+
option is marginal as the call depth tracking is using run-time
2571+
generated call thunks in a compiler generated padding area and call
2572+
patching. This increases text size by ~5%. For non affected systems
2573+
this space is unused. On affected SKL systems this results in a
2574+
significant performance gain over the IBRS mitigation.
25762575

25772576
config CALL_THUNKS_DEBUG
25782577
bool "Enable call thunks and call depth tracking debugging"

arch/x86/include/asm/cpuid.h

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,8 @@
66
#ifndef _ASM_X86_CPUID_H
77
#define _ASM_X86_CPUID_H
88

9+
#include <linux/types.h>
10+
911
#include <asm/string.h>
1012

1113
struct cpuid_regs {
@@ -20,11 +22,11 @@ enum cpuid_regs_idx {
2022
};
2123

2224
#ifdef CONFIG_X86_32
23-
extern int have_cpuid_p(void);
25+
bool have_cpuid_p(void);
2426
#else
25-
static inline int have_cpuid_p(void)
27+
static inline bool have_cpuid_p(void)
2628
{
27-
return 1;
29+
return true;
2830
}
2931
#endif
3032
static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,

arch/x86/kernel/cpu/common.c

Lines changed: 18 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -276,21 +276,13 @@ static int __init x86_noinvpcid_setup(char *s)
276276
}
277277
early_param("noinvpcid", x86_noinvpcid_setup);
278278

279-
#ifdef CONFIG_X86_32
280-
static int cachesize_override = -1;
281-
static int disable_x86_serial_nr = 1;
282-
283-
static int __init cachesize_setup(char *str)
284-
{
285-
get_option(&str, &cachesize_override);
286-
return 1;
287-
}
288-
__setup("cachesize=", cachesize_setup);
289-
290279
/* Standard macro to see if a specific flag is changeable */
291-
static inline int flag_is_changeable_p(u32 flag)
280+
static inline bool flag_is_changeable_p(unsigned long flag)
292281
{
293-
u32 f1, f2;
282+
unsigned long f1, f2;
283+
284+
if (!IS_ENABLED(CONFIG_X86_32))
285+
return true;
294286

295287
/*
296288
* Cyrix and IDT cpus allow disabling of CPUID
@@ -313,11 +305,22 @@ static inline int flag_is_changeable_p(u32 flag)
313305
: "=&r" (f1), "=&r" (f2)
314306
: "ir" (flag));
315307

316-
return ((f1^f2) & flag) != 0;
308+
return (f1 ^ f2) & flag;
317309
}
318310

311+
#ifdef CONFIG_X86_32
312+
static int cachesize_override = -1;
313+
static int disable_x86_serial_nr = 1;
314+
315+
static int __init cachesize_setup(char *str)
316+
{
317+
get_option(&str, &cachesize_override);
318+
return 1;
319+
}
320+
__setup("cachesize=", cachesize_setup);
321+
319322
/* Probe for the CPUID instruction */
320-
int have_cpuid_p(void)
323+
bool have_cpuid_p(void)
321324
{
322325
return flag_is_changeable_p(X86_EFLAGS_ID);
323326
}
@@ -349,10 +352,6 @@ static int __init x86_serial_nr_setup(char *s)
349352
}
350353
__setup("serialnumber", x86_serial_nr_setup);
351354
#else
352-
static inline int flag_is_changeable_p(u32 flag)
353-
{
354-
return 1;
355-
}
356355
static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
357356
{
358357
}
@@ -1088,7 +1087,6 @@ void get_cpu_address_sizes(struct cpuinfo_x86 *c)
10881087

10891088
static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
10901089
{
1091-
#ifdef CONFIG_X86_32
10921090
int i;
10931091

10941092
/*
@@ -1109,7 +1107,6 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
11091107
break;
11101108
}
11111109
}
1112-
#endif
11131110
}
11141111

11151112
#define NO_SPECULATION BIT(0)

0 commit comments

Comments
 (0)