@@ -323,6 +323,44 @@ static void __init setup_bootmem(void)
323
323
hugetlb_cma_reserve (PUD_SHIFT - PAGE_SHIFT );
324
324
}
325
325
326
+ #ifdef CONFIG_RELOCATABLE
327
+ extern unsigned long __rela_dyn_start , __rela_dyn_end ;
328
+
329
+ static void __init relocate_kernel (void )
330
+ {
331
+ Elf64_Rela * rela = (Elf64_Rela * )& __rela_dyn_start ;
332
+ /*
333
+ * This holds the offset between the linked virtual address and the
334
+ * relocated virtual address.
335
+ */
336
+ uintptr_t reloc_offset = kernel_map .virt_addr - KERNEL_LINK_ADDR ;
337
+ /*
338
+ * This holds the offset between kernel linked virtual address and
339
+ * physical address.
340
+ */
341
+ uintptr_t va_kernel_link_pa_offset = KERNEL_LINK_ADDR - kernel_map .phys_addr ;
342
+
343
+ for ( ; rela < (Elf64_Rela * )& __rela_dyn_end ; rela ++ ) {
344
+ Elf64_Addr addr = (rela -> r_offset - va_kernel_link_pa_offset );
345
+ Elf64_Addr relocated_addr = rela -> r_addend ;
346
+
347
+ if (rela -> r_info != R_RISCV_RELATIVE )
348
+ continue ;
349
+
350
+ /*
351
+ * Make sure to not relocate vdso symbols like rt_sigreturn
352
+ * which are linked from the address 0 in vmlinux since
353
+ * vdso symbol addresses are actually used as an offset from
354
+ * mm->context.vdso in VDSO_OFFSET macro.
355
+ */
356
+ if (relocated_addr >= KERNEL_LINK_ADDR )
357
+ relocated_addr += reloc_offset ;
358
+
359
+ * (Elf64_Addr * )addr = relocated_addr ;
360
+ }
361
+ }
362
+ #endif /* CONFIG_RELOCATABLE */
363
+
326
364
#ifdef CONFIG_MMU
327
365
struct pt_alloc_ops pt_ops __meminitdata ;
328
366
@@ -893,44 +931,6 @@ static __init void set_satp_mode(uintptr_t dtb_pa)
893
931
#error "setup_vm() is called from head.S before relocate so it should not use absolute addressing."
894
932
#endif
895
933
896
- #ifdef CONFIG_RELOCATABLE
897
- extern unsigned long __rela_dyn_start , __rela_dyn_end ;
898
-
899
- static void __init relocate_kernel (void )
900
- {
901
- Elf64_Rela * rela = (Elf64_Rela * )& __rela_dyn_start ;
902
- /*
903
- * This holds the offset between the linked virtual address and the
904
- * relocated virtual address.
905
- */
906
- uintptr_t reloc_offset = kernel_map .virt_addr - KERNEL_LINK_ADDR ;
907
- /*
908
- * This holds the offset between kernel linked virtual address and
909
- * physical address.
910
- */
911
- uintptr_t va_kernel_link_pa_offset = KERNEL_LINK_ADDR - kernel_map .phys_addr ;
912
-
913
- for ( ; rela < (Elf64_Rela * )& __rela_dyn_end ; rela ++ ) {
914
- Elf64_Addr addr = (rela -> r_offset - va_kernel_link_pa_offset );
915
- Elf64_Addr relocated_addr = rela -> r_addend ;
916
-
917
- if (rela -> r_info != R_RISCV_RELATIVE )
918
- continue ;
919
-
920
- /*
921
- * Make sure to not relocate vdso symbols like rt_sigreturn
922
- * which are linked from the address 0 in vmlinux since
923
- * vdso symbol addresses are actually used as an offset from
924
- * mm->context.vdso in VDSO_OFFSET macro.
925
- */
926
- if (relocated_addr >= KERNEL_LINK_ADDR )
927
- relocated_addr += reloc_offset ;
928
-
929
- * (Elf64_Addr * )addr = relocated_addr ;
930
- }
931
- }
932
- #endif /* CONFIG_RELOCATABLE */
933
-
934
934
#ifdef CONFIG_XIP_KERNEL
935
935
static void __init create_kernel_page_table (pgd_t * pgdir ,
936
936
__always_unused bool early )
@@ -1378,6 +1378,12 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
1378
1378
{
1379
1379
dtb_early_va = (void * )dtb_pa ;
1380
1380
dtb_early_pa = dtb_pa ;
1381
+
1382
+ #ifdef CONFIG_RELOCATABLE
1383
+ kernel_map .virt_addr = (uintptr_t )_start ;
1384
+ kernel_map .phys_addr = (uintptr_t )_start ;
1385
+ relocate_kernel ();
1386
+ #endif
1381
1387
}
1382
1388
1383
1389
static inline void setup_vm_final (void )
0 commit comments