Skip to content

Commit b58386a

Browse files
committed
Merge tag 'x86-boot-2025-03-22' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 boot code updates from Ingo Molnar: - Memblock setup and other early boot code cleanups (Mike Rapoport) - Export e820_table_kexec[] to sysfs (Dave Young) - Baby steps of adding relocate_kernel() debugging support (David Woodhouse) - Replace open-coded parity calculation with parity8() (Kuan-Wei Chiu) - Move the LA57 trampoline to separate source file (Ard Biesheuvel) - Misc micro-optimizations (Uros Bizjak) - Drop obsolete E820_TYPE_RESERVED_KERN and related code (Mike Rapoport) * tag 'x86-boot-2025-03-22' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/kexec: Add relocate_kernel() debugging support: Load a GDT x86/boot: Move the LA57 trampoline to separate source file x86/boot: Do not test if AC and ID eflags are changeable on x86_64 x86/bootflag: Replace open-coded parity calculation with parity8() x86/bootflag: Micro-optimize sbf_write() x86/boot: Add missing has_cpuflag() prototype x86/kexec: Export e820_table_kexec[] to sysfs x86/boot: Change some static bootflag functions to bool x86/e820: Drop obsolete E820_TYPE_RESERVED_KERN and related code x86/boot: Split parsing of boot_params into the parse_boot_params() helper function x86/boot: Split kernel resources setup into the setup_kernel_resources() helper function x86/boot: Move setting of memblock parameters to e820__memblock_setup()
2 parents ebfb94d + b25eb5f commit b58386a

File tree

14 files changed

+266
-306
lines changed

14 files changed

+266
-306
lines changed

arch/x86/boot/compressed/Makefile

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -98,6 +98,7 @@ ifdef CONFIG_X86_64
9898
vmlinux-objs-$(CONFIG_AMD_MEM_ENCRYPT) += $(obj)/mem_encrypt.o
9999
vmlinux-objs-y += $(obj)/pgtable_64.o
100100
vmlinux-objs-$(CONFIG_AMD_MEM_ENCRYPT) += $(obj)/sev.o
101+
vmlinux-objs-y += $(obj)/la57toggle.o
101102
endif
102103

103104
vmlinux-objs-$(CONFIG_ACPI) += $(obj)/acpi.o

arch/x86/boot/compressed/head_64.S

Lines changed: 0 additions & 103 deletions
Original file line numberDiff line numberDiff line change
@@ -483,110 +483,7 @@ SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated)
483483
jmp *%rax
484484
SYM_FUNC_END(.Lrelocated)
485485

486-
/*
487-
* This is the 32-bit trampoline that will be copied over to low memory. It
488-
* will be called using the ordinary 64-bit calling convention from code
489-
* running in 64-bit mode.
490-
*
491-
* Return address is at the top of the stack (might be above 4G).
492-
* The first argument (EDI) contains the address of the temporary PGD level
493-
* page table in 32-bit addressable memory which will be programmed into
494-
* register CR3.
495-
*/
496-
.section ".rodata", "a", @progbits
497-
SYM_CODE_START(trampoline_32bit_src)
498-
/*
499-
* Preserve callee save 64-bit registers on the stack: this is
500-
* necessary because the architecture does not guarantee that GPRs will
501-
* retain their full 64-bit values across a 32-bit mode switch.
502-
*/
503-
pushq %r15
504-
pushq %r14
505-
pushq %r13
506-
pushq %r12
507-
pushq %rbp
508-
pushq %rbx
509-
510-
/* Preserve top half of RSP in a legacy mode GPR to avoid truncation */
511-
movq %rsp, %rbx
512-
shrq $32, %rbx
513-
514-
/* Switch to compatibility mode (CS.L = 0 CS.D = 1) via far return */
515-
pushq $__KERNEL32_CS
516-
leaq 0f(%rip), %rax
517-
pushq %rax
518-
lretq
519-
520-
/*
521-
* The 32-bit code below will do a far jump back to long mode and end
522-
* up here after reconfiguring the number of paging levels. First, the
523-
* stack pointer needs to be restored to its full 64-bit value before
524-
* the callee save register contents can be popped from the stack.
525-
*/
526-
.Lret:
527-
shlq $32, %rbx
528-
orq %rbx, %rsp
529-
530-
/* Restore the preserved 64-bit registers */
531-
popq %rbx
532-
popq %rbp
533-
popq %r12
534-
popq %r13
535-
popq %r14
536-
popq %r15
537-
retq
538-
539486
.code32
540-
0:
541-
/* Disable paging */
542-
movl %cr0, %eax
543-
btrl $X86_CR0_PG_BIT, %eax
544-
movl %eax, %cr0
545-
546-
/* Point CR3 to the trampoline's new top level page table */
547-
movl %edi, %cr3
548-
549-
/* Set EFER.LME=1 as a precaution in case hypervsior pulls the rug */
550-
movl $MSR_EFER, %ecx
551-
rdmsr
552-
btsl $_EFER_LME, %eax
553-
/* Avoid writing EFER if no change was made (for TDX guest) */
554-
jc 1f
555-
wrmsr
556-
1:
557-
/* Toggle CR4.LA57 */
558-
movl %cr4, %eax
559-
btcl $X86_CR4_LA57_BIT, %eax
560-
movl %eax, %cr4
561-
562-
/* Enable paging again. */
563-
movl %cr0, %eax
564-
btsl $X86_CR0_PG_BIT, %eax
565-
movl %eax, %cr0
566-
567-
/*
568-
* Return to the 64-bit calling code using LJMP rather than LRET, to
569-
* avoid the need for a 32-bit addressable stack. The destination
570-
* address will be adjusted after the template code is copied into a
571-
* 32-bit addressable buffer.
572-
*/
573-
.Ljmp: ljmpl $__KERNEL_CS, $(.Lret - trampoline_32bit_src)
574-
SYM_CODE_END(trampoline_32bit_src)
575-
576-
/*
577-
* This symbol is placed right after trampoline_32bit_src() so its address can
578-
* be used to infer the size of the trampoline code.
579-
*/
580-
SYM_DATA(trampoline_ljmp_imm_offset, .word .Ljmp + 1 - trampoline_32bit_src)
581-
582-
/*
583-
* The trampoline code has a size limit.
584-
* Make sure we fail to compile if the trampoline code grows
585-
* beyond TRAMPOLINE_32BIT_CODE_SIZE bytes.
586-
*/
587-
.org trampoline_32bit_src + TRAMPOLINE_32BIT_CODE_SIZE
588-
589-
.text
590487
SYM_FUNC_START_LOCAL_NOALIGN(.Lno_longmode)
591488
/* This isn't an x86-64 CPU, so hang intentionally, we cannot continue */
592489
1:

arch/x86/boot/compressed/la57toggle.S

Lines changed: 112 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,112 @@
1+
/* SPDX-License-Identifier: GPL-2.0 */
2+
3+
#include <linux/linkage.h>
4+
#include <asm/segment.h>
5+
#include <asm/boot.h>
6+
#include <asm/msr.h>
7+
#include <asm/processor-flags.h>
8+
#include "pgtable.h"
9+
10+
/*
11+
* This is the 32-bit trampoline that will be copied over to low memory. It
12+
* will be called using the ordinary 64-bit calling convention from code
13+
* running in 64-bit mode.
14+
*
15+
* Return address is at the top of the stack (might be above 4G).
16+
* The first argument (EDI) contains the address of the temporary PGD level
17+
* page table in 32-bit addressable memory which will be programmed into
18+
* register CR3.
19+
*/
20+
21+
.section ".rodata", "a", @progbits
22+
SYM_CODE_START(trampoline_32bit_src)
23+
/*
24+
* Preserve callee save 64-bit registers on the stack: this is
25+
* necessary because the architecture does not guarantee that GPRs will
26+
* retain their full 64-bit values across a 32-bit mode switch.
27+
*/
28+
pushq %r15
29+
pushq %r14
30+
pushq %r13
31+
pushq %r12
32+
pushq %rbp
33+
pushq %rbx
34+
35+
/* Preserve top half of RSP in a legacy mode GPR to avoid truncation */
36+
movq %rsp, %rbx
37+
shrq $32, %rbx
38+
39+
/* Switch to compatibility mode (CS.L = 0 CS.D = 1) via far return */
40+
pushq $__KERNEL32_CS
41+
leaq 0f(%rip), %rax
42+
pushq %rax
43+
lretq
44+
45+
/*
46+
* The 32-bit code below will do a far jump back to long mode and end
47+
* up here after reconfiguring the number of paging levels. First, the
48+
* stack pointer needs to be restored to its full 64-bit value before
49+
* the callee save register contents can be popped from the stack.
50+
*/
51+
.Lret:
52+
shlq $32, %rbx
53+
orq %rbx, %rsp
54+
55+
/* Restore the preserved 64-bit registers */
56+
popq %rbx
57+
popq %rbp
58+
popq %r12
59+
popq %r13
60+
popq %r14
61+
popq %r15
62+
retq
63+
64+
.code32
65+
0:
66+
/* Disable paging */
67+
movl %cr0, %eax
68+
btrl $X86_CR0_PG_BIT, %eax
69+
movl %eax, %cr0
70+
71+
/* Point CR3 to the trampoline's new top level page table */
72+
movl %edi, %cr3
73+
74+
/* Set EFER.LME=1 as a precaution in case hypervsior pulls the rug */
75+
movl $MSR_EFER, %ecx
76+
rdmsr
77+
btsl $_EFER_LME, %eax
78+
/* Avoid writing EFER if no change was made (for TDX guest) */
79+
jc 1f
80+
wrmsr
81+
1:
82+
/* Toggle CR4.LA57 */
83+
movl %cr4, %eax
84+
btcl $X86_CR4_LA57_BIT, %eax
85+
movl %eax, %cr4
86+
87+
/* Enable paging again. */
88+
movl %cr0, %eax
89+
btsl $X86_CR0_PG_BIT, %eax
90+
movl %eax, %cr0
91+
92+
/*
93+
* Return to the 64-bit calling code using LJMP rather than LRET, to
94+
* avoid the need for a 32-bit addressable stack. The destination
95+
* address will be adjusted after the template code is copied into a
96+
* 32-bit addressable buffer.
97+
*/
98+
.Ljmp: ljmpl $__KERNEL_CS, $(.Lret - trampoline_32bit_src)
99+
SYM_CODE_END(trampoline_32bit_src)
100+
101+
/*
102+
* This symbol is placed right after trampoline_32bit_src() so its address can
103+
* be used to infer the size of the trampoline code.
104+
*/
105+
SYM_DATA(trampoline_ljmp_imm_offset, .word .Ljmp + 1 - trampoline_32bit_src)
106+
107+
/*
108+
* The trampoline code has a size limit.
109+
* Make sure we fail to compile if the trampoline code grows
110+
* beyond TRAMPOLINE_32BIT_CODE_SIZE bytes.
111+
*/
112+
.org trampoline_32bit_src + TRAMPOLINE_32BIT_CODE_SIZE

arch/x86/boot/cpuflags.c

Lines changed: 9 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -28,40 +28,32 @@ static int has_fpu(void)
2828
return fsw == 0 && (fcw & 0x103f) == 0x003f;
2929
}
3030

31+
#ifdef CONFIG_X86_32
3132
/*
3233
* For building the 16-bit code we want to explicitly specify 32-bit
3334
* push/pop operations, rather than just saying 'pushf' or 'popf' and
34-
* letting the compiler choose. But this is also included from the
35-
* compressed/ directory where it may be 64-bit code, and thus needs
36-
* to be 'pushfq' or 'popfq' in that case.
35+
* letting the compiler choose.
3736
*/
38-
#ifdef __x86_64__
39-
#define PUSHF "pushfq"
40-
#define POPF "popfq"
41-
#else
42-
#define PUSHF "pushfl"
43-
#define POPF "popfl"
44-
#endif
45-
46-
int has_eflag(unsigned long mask)
37+
bool has_eflag(unsigned long mask)
4738
{
4839
unsigned long f0, f1;
4940

50-
asm volatile(PUSHF " \n\t"
51-
PUSHF " \n\t"
41+
asm volatile("pushfl \n\t"
42+
"pushfl \n\t"
5243
"pop %0 \n\t"
5344
"mov %0,%1 \n\t"
5445
"xor %2,%1 \n\t"
5546
"push %1 \n\t"
56-
POPF " \n\t"
57-
PUSHF " \n\t"
47+
"popfl \n\t"
48+
"pushfl \n\t"
5849
"pop %1 \n\t"
59-
POPF
50+
"popfl"
6051
: "=&r" (f0), "=&r" (f1)
6152
: "ri" (mask));
6253

6354
return !!((f0^f1) & mask);
6455
}
56+
#endif
6557

6658
void cpuid_count(u32 id, u32 count, u32 *a, u32 *b, u32 *c, u32 *d)
6759
{

arch/x86/boot/cpuflags.h

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,8 +15,13 @@ struct cpu_features {
1515
extern struct cpu_features cpu;
1616
extern u32 cpu_vendor[3];
1717

18-
int has_eflag(unsigned long mask);
18+
#ifdef CONFIG_X86_32
19+
bool has_eflag(unsigned long mask);
20+
#else
21+
static inline bool has_eflag(unsigned long mask) { return true; }
22+
#endif
1923
void get_cpuflags(void);
2024
void cpuid_count(u32 id, u32 count, u32 *a, u32 *b, u32 *c, u32 *d);
25+
bool has_cpuflag(int flag);
2126

2227
#endif

arch/x86/include/asm/e820/api.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,6 @@ extern unsigned long e820__end_of_low_ram_pfn(void);
2929
extern u64 e820__memblock_alloc_reserved(u64 size, u64 align);
3030
extern void e820__memblock_setup(void);
3131

32-
extern void e820__reserve_setup_data(void);
3332
extern void e820__finish_early_params(void);
3433
extern void e820__reserve_resources(void);
3534
extern void e820__reserve_resources_late(void);

arch/x86/include/asm/e820/types.h

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -35,15 +35,6 @@ enum e820_type {
3535
* marking it with the IORES_DESC_SOFT_RESERVED designation.
3636
*/
3737
E820_TYPE_SOFT_RESERVED = 0xefffffff,
38-
39-
/*
40-
* Reserved RAM used by the kernel itself if
41-
* CONFIG_INTEL_TXT=y is enabled, memory of this type
42-
* will be included in the S3 integrity calculation
43-
* and so should not include any memory that the BIOS
44-
* might alter over the S3 transition:
45-
*/
46-
E820_TYPE_RESERVED_KERN = 128,
4738
};
4839

4940
/*

arch/x86/kernel/bootflag.c

Lines changed: 8 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88
#include <linux/string.h>
99
#include <linux/spinlock.h>
1010
#include <linux/acpi.h>
11+
#include <linux/bitops.h>
1112
#include <asm/io.h>
1213

1314
#include <linux/mc146818rtc.h>
@@ -20,27 +21,13 @@
2021

2122
int sbf_port __initdata = -1; /* set via acpi_boot_init() */
2223

23-
static int __init parity(u8 v)
24-
{
25-
int x = 0;
26-
int i;
27-
28-
for (i = 0; i < 8; i++) {
29-
x ^= (v & 1);
30-
v >>= 1;
31-
}
32-
33-
return x;
34-
}
35-
3624
static void __init sbf_write(u8 v)
3725
{
3826
unsigned long flags;
3927

4028
if (sbf_port != -1) {
41-
v &= ~SBF_PARITY;
42-
if (!parity(v))
43-
v |= SBF_PARITY;
29+
if (!parity8(v))
30+
v ^= SBF_PARITY;
4431

4532
printk(KERN_INFO "Simple Boot Flag at 0x%x set to 0x%x\n",
4633
sbf_port, v);
@@ -66,14 +53,14 @@ static u8 __init sbf_read(void)
6653
return v;
6754
}
6855

69-
static int __init sbf_value_valid(u8 v)
56+
static bool __init sbf_value_valid(u8 v)
7057
{
7158
if (v & SBF_RESERVED) /* Reserved bits */
72-
return 0;
73-
if (!parity(v))
74-
return 0;
59+
return false;
60+
if (!parity8(v))
61+
return false;
7562

76-
return 1;
63+
return true;
7764
}
7865

7966
static int __init sbf_init(void)

0 commit comments

Comments
 (0)