20
20
#include <linux/dma-map-ops.h>
21
21
#include <linux/crash_dump.h>
22
22
#include <linux/hugetlb.h>
23
- #ifdef CONFIG_RELOCATABLE
24
- #include <linux/elf.h>
25
- #endif
26
23
#include <linux/kfence.h>
27
24
#include <linux/execmem.h>
28
25
29
26
#include <asm/fixmap.h>
30
27
#include <asm/io.h>
31
28
#include <asm/kasan.h>
29
+ #include <asm/module.h>
32
30
#include <asm/numa.h>
33
31
#include <asm/pgtable.h>
34
32
#include <asm/sections.h>
@@ -323,6 +321,44 @@ static void __init setup_bootmem(void)
323
321
hugetlb_cma_reserve (PUD_SHIFT - PAGE_SHIFT );
324
322
}
325
323
324
+ #ifdef CONFIG_RELOCATABLE
325
+ extern unsigned long __rela_dyn_start , __rela_dyn_end ;
326
+
327
+ static void __init relocate_kernel (void )
328
+ {
329
+ Elf_Rela * rela = (Elf_Rela * )& __rela_dyn_start ;
330
+ /*
331
+ * This holds the offset between the linked virtual address and the
332
+ * relocated virtual address.
333
+ */
334
+ uintptr_t reloc_offset = kernel_map .virt_addr - KERNEL_LINK_ADDR ;
335
+ /*
336
+ * This holds the offset between kernel linked virtual address and
337
+ * physical address.
338
+ */
339
+ uintptr_t va_kernel_link_pa_offset = KERNEL_LINK_ADDR - kernel_map .phys_addr ;
340
+
341
+ for ( ; rela < (Elf_Rela * )& __rela_dyn_end ; rela ++ ) {
342
+ Elf_Addr addr = (rela -> r_offset - va_kernel_link_pa_offset );
343
+ Elf_Addr relocated_addr = rela -> r_addend ;
344
+
345
+ if (rela -> r_info != R_RISCV_RELATIVE )
346
+ continue ;
347
+
348
+ /*
349
+ * Make sure to not relocate vdso symbols like rt_sigreturn
350
+ * which are linked from the address 0 in vmlinux since
351
+ * vdso symbol addresses are actually used as an offset from
352
+ * mm->context.vdso in VDSO_OFFSET macro.
353
+ */
354
+ if (relocated_addr >= KERNEL_LINK_ADDR )
355
+ relocated_addr += reloc_offset ;
356
+
357
+ * (Elf_Addr * )addr = relocated_addr ;
358
+ }
359
+ }
360
+ #endif /* CONFIG_RELOCATABLE */
361
+
326
362
#ifdef CONFIG_MMU
327
363
struct pt_alloc_ops pt_ops __meminitdata ;
328
364
@@ -823,6 +859,8 @@ static __init void set_satp_mode(uintptr_t dtb_pa)
823
859
uintptr_t set_satp_mode_pmd = ((unsigned long )set_satp_mode ) & PMD_MASK ;
824
860
u64 satp_mode_cmdline = __pi_set_satp_mode_from_cmdline (dtb_pa );
825
861
862
+ kernel_map .page_offset = PAGE_OFFSET_L5 ;
863
+
826
864
if (satp_mode_cmdline == SATP_MODE_57 ) {
827
865
disable_pgtable_l5 ();
828
866
} else if (satp_mode_cmdline == SATP_MODE_48 ) {
@@ -893,44 +931,6 @@ static __init void set_satp_mode(uintptr_t dtb_pa)
893
931
#error "setup_vm() is called from head.S before relocate so it should not use absolute addressing."
894
932
#endif
895
933
896
- #ifdef CONFIG_RELOCATABLE
897
- extern unsigned long __rela_dyn_start , __rela_dyn_end ;
898
-
899
- static void __init relocate_kernel (void )
900
- {
901
- Elf64_Rela * rela = (Elf64_Rela * )& __rela_dyn_start ;
902
- /*
903
- * This holds the offset between the linked virtual address and the
904
- * relocated virtual address.
905
- */
906
- uintptr_t reloc_offset = kernel_map .virt_addr - KERNEL_LINK_ADDR ;
907
- /*
908
- * This holds the offset between kernel linked virtual address and
909
- * physical address.
910
- */
911
- uintptr_t va_kernel_link_pa_offset = KERNEL_LINK_ADDR - kernel_map .phys_addr ;
912
-
913
- for ( ; rela < (Elf64_Rela * )& __rela_dyn_end ; rela ++ ) {
914
- Elf64_Addr addr = (rela -> r_offset - va_kernel_link_pa_offset );
915
- Elf64_Addr relocated_addr = rela -> r_addend ;
916
-
917
- if (rela -> r_info != R_RISCV_RELATIVE )
918
- continue ;
919
-
920
- /*
921
- * Make sure to not relocate vdso symbols like rt_sigreturn
922
- * which are linked from the address 0 in vmlinux since
923
- * vdso symbol addresses are actually used as an offset from
924
- * mm->context.vdso in VDSO_OFFSET macro.
925
- */
926
- if (relocated_addr >= KERNEL_LINK_ADDR )
927
- relocated_addr += reloc_offset ;
928
-
929
- * (Elf64_Addr * )addr = relocated_addr ;
930
- }
931
- }
932
- #endif /* CONFIG_RELOCATABLE */
933
-
934
934
#ifdef CONFIG_XIP_KERNEL
935
935
static void __init create_kernel_page_table (pgd_t * pgdir ,
936
936
__always_unused bool early )
@@ -1108,11 +1108,6 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
1108
1108
kernel_map .virt_addr = KERNEL_LINK_ADDR + kernel_map .virt_offset ;
1109
1109
1110
1110
#ifdef CONFIG_XIP_KERNEL
1111
- #ifdef CONFIG_64BIT
1112
- kernel_map .page_offset = PAGE_OFFSET_L3 ;
1113
- #else
1114
- kernel_map .page_offset = _AC (CONFIG_PAGE_OFFSET , UL );
1115
- #endif
1116
1111
kernel_map .xiprom = (uintptr_t )CONFIG_XIP_PHYS_ADDR ;
1117
1112
kernel_map .xiprom_sz = (uintptr_t )(& _exiprom ) - (uintptr_t )(& _xiprom );
1118
1113
@@ -1127,7 +1122,6 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
1127
1122
kernel_map .va_kernel_xip_data_pa_offset = kernel_map .virt_addr - kernel_map .phys_addr
1128
1123
+ (uintptr_t )& _sdata - (uintptr_t )& _start ;
1129
1124
#else
1130
- kernel_map .page_offset = _AC (CONFIG_PAGE_OFFSET , UL );
1131
1125
kernel_map .phys_addr = (uintptr_t )(& _start );
1132
1126
kernel_map .size = (uintptr_t )(& _end ) - kernel_map .phys_addr ;
1133
1127
kernel_map .va_kernel_pa_offset = kernel_map .virt_addr - kernel_map .phys_addr ;
@@ -1174,7 +1168,8 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
1174
1168
* makes the kernel cross over a PUD_SIZE boundary, raise a bug
1175
1169
* since a part of the kernel would not get mapped.
1176
1170
*/
1177
- BUG_ON (PUD_SIZE - (kernel_map .virt_addr & (PUD_SIZE - 1 )) < kernel_map .size );
1171
+ if (IS_ENABLED (CONFIG_64BIT ))
1172
+ BUG_ON (PUD_SIZE - (kernel_map .virt_addr & (PUD_SIZE - 1 )) < kernel_map .size );
1178
1173
relocate_kernel ();
1179
1174
#endif
1180
1175
@@ -1378,6 +1373,12 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
1378
1373
{
1379
1374
dtb_early_va = (void * )dtb_pa ;
1380
1375
dtb_early_pa = dtb_pa ;
1376
+
1377
+ #ifdef CONFIG_RELOCATABLE
1378
+ kernel_map .virt_addr = (uintptr_t )_start ;
1379
+ kernel_map .phys_addr = (uintptr_t )_start ;
1380
+ relocate_kernel ();
1381
+ #endif
1381
1382
}
1382
1383
1383
1384
static inline void setup_vm_final (void )
0 commit comments