Skip to content

Commit 51b766c

Browse files
SiFiveHollandpalmer-dabbelt
authored andcommitted
riscv: Support CONFIG_RELOCATABLE on NOMMU
Move relocate_kernel() out of the CONFIG_MMU block so it can be called from the NOMMU version of setup_vm(). Set some offsets in kernel_map so relocate_kernel() does not need to be modified. Relocatable NOMMU kernels can be loaded to any physical memory address; they no longer depend on CONFIG_PAGE_OFFSET. Signed-off-by: Samuel Holland <samuel.holland@sifive.com> Link: https://lore.kernel.org/r/20241026171441.3047904-4-samuel.holland@sifive.com Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
1 parent 2c0391b commit 51b766c

File tree

3 files changed

+49
-39
lines changed

3 files changed

+49
-39
lines changed

arch/riscv/Kconfig

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1075,7 +1075,7 @@ config PARAVIRT_TIME_ACCOUNTING
10751075

10761076
config RELOCATABLE
10771077
bool "Build a relocatable kernel"
1078-
depends on MMU && 64BIT && !XIP_KERNEL
1078+
depends on 64BIT && !XIP_KERNEL
10791079
select MODULE_SECTIONS if MODULES
10801080
help
10811081
This builds a kernel as a Position Independent Executable (PIE),

arch/riscv/include/asm/pgtable.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,11 @@
1212
#include <asm/pgtable-bits.h>
1313

1414
#ifndef CONFIG_MMU
15+
#ifdef CONFIG_RELOCATABLE
16+
#define KERNEL_LINK_ADDR UL(0)
17+
#else
1518
#define KERNEL_LINK_ADDR _AC(CONFIG_PAGE_OFFSET, UL)
19+
#endif
1620
#define KERN_VIRT_SIZE (UL(-1))
1721
#else
1822

arch/riscv/mm/init.c

Lines changed: 44 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -323,6 +323,44 @@ static void __init setup_bootmem(void)
323323
hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT);
324324
}
325325

326+
#ifdef CONFIG_RELOCATABLE
327+
extern unsigned long __rela_dyn_start, __rela_dyn_end;
328+
329+
static void __init relocate_kernel(void)
330+
{
331+
Elf64_Rela *rela = (Elf64_Rela *)&__rela_dyn_start;
332+
/*
333+
* This holds the offset between the linked virtual address and the
334+
* relocated virtual address.
335+
*/
336+
uintptr_t reloc_offset = kernel_map.virt_addr - KERNEL_LINK_ADDR;
337+
/*
338+
* This holds the offset between kernel linked virtual address and
339+
* physical address.
340+
*/
341+
uintptr_t va_kernel_link_pa_offset = KERNEL_LINK_ADDR - kernel_map.phys_addr;
342+
343+
for ( ; rela < (Elf64_Rela *)&__rela_dyn_end; rela++) {
344+
Elf64_Addr addr = (rela->r_offset - va_kernel_link_pa_offset);
345+
Elf64_Addr relocated_addr = rela->r_addend;
346+
347+
if (rela->r_info != R_RISCV_RELATIVE)
348+
continue;
349+
350+
/*
351+
* Make sure to not relocate vdso symbols like rt_sigreturn
352+
* which are linked from the address 0 in vmlinux since
353+
* vdso symbol addresses are actually used as an offset from
354+
* mm->context.vdso in VDSO_OFFSET macro.
355+
*/
356+
if (relocated_addr >= KERNEL_LINK_ADDR)
357+
relocated_addr += reloc_offset;
358+
359+
*(Elf64_Addr *)addr = relocated_addr;
360+
}
361+
}
362+
#endif /* CONFIG_RELOCATABLE */
363+
326364
#ifdef CONFIG_MMU
327365
struct pt_alloc_ops pt_ops __meminitdata;
328366

@@ -893,44 +931,6 @@ static __init void set_satp_mode(uintptr_t dtb_pa)
893931
#error "setup_vm() is called from head.S before relocate so it should not use absolute addressing."
894932
#endif
895933

896-
#ifdef CONFIG_RELOCATABLE
897-
extern unsigned long __rela_dyn_start, __rela_dyn_end;
898-
899-
static void __init relocate_kernel(void)
900-
{
901-
Elf64_Rela *rela = (Elf64_Rela *)&__rela_dyn_start;
902-
/*
903-
* This holds the offset between the linked virtual address and the
904-
* relocated virtual address.
905-
*/
906-
uintptr_t reloc_offset = kernel_map.virt_addr - KERNEL_LINK_ADDR;
907-
/*
908-
* This holds the offset between kernel linked virtual address and
909-
* physical address.
910-
*/
911-
uintptr_t va_kernel_link_pa_offset = KERNEL_LINK_ADDR - kernel_map.phys_addr;
912-
913-
for ( ; rela < (Elf64_Rela *)&__rela_dyn_end; rela++) {
914-
Elf64_Addr addr = (rela->r_offset - va_kernel_link_pa_offset);
915-
Elf64_Addr relocated_addr = rela->r_addend;
916-
917-
if (rela->r_info != R_RISCV_RELATIVE)
918-
continue;
919-
920-
/*
921-
* Make sure to not relocate vdso symbols like rt_sigreturn
922-
* which are linked from the address 0 in vmlinux since
923-
* vdso symbol addresses are actually used as an offset from
924-
* mm->context.vdso in VDSO_OFFSET macro.
925-
*/
926-
if (relocated_addr >= KERNEL_LINK_ADDR)
927-
relocated_addr += reloc_offset;
928-
929-
*(Elf64_Addr *)addr = relocated_addr;
930-
}
931-
}
932-
#endif /* CONFIG_RELOCATABLE */
933-
934934
#ifdef CONFIG_XIP_KERNEL
935935
static void __init create_kernel_page_table(pgd_t *pgdir,
936936
__always_unused bool early)
@@ -1378,6 +1378,12 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
13781378
{
13791379
dtb_early_va = (void *)dtb_pa;
13801380
dtb_early_pa = dtb_pa;
1381+
1382+
#ifdef CONFIG_RELOCATABLE
1383+
kernel_map.virt_addr = (uintptr_t)_start;
1384+
kernel_map.phys_addr = (uintptr_t)_start;
1385+
relocate_kernel();
1386+
#endif
13811387
}
13821388

13831389
static inline void setup_vm_final(void)

0 commit comments

Comments
 (0)