Skip to content

Commit 5aa4ac6

Browse files
zhangqingmychenhuacai
authored andcommitted
LoongArch: Add KASAN (Kernel Address Sanitizer) support
1/8 of kernel addresses reserved for shadow memory. But for LoongArch, There are a lot of holes between different segments and valid address space (256T available) is insufficient to map all these segments to kasan shadow memory with the common formula provided by kasan core, saying (addr >> KASAN_SHADOW_SCALE_SHIFT) + KASAN_SHADOW_OFFSET So LoongArch has a arch-specific mapping formula, different segments are mapped individually, and only limited space lengths of these specific segments are mapped to shadow. At early boot stage the whole shadow region populated with just one physical page (kasan_early_shadow_page). Later, this page is reused as readonly zero shadow for some memory that kasan currently don't track. After mapping the physical memory, pages for shadow memory are allocated and mapped. Functions like memset()/memcpy()/memmove() do a lot of memory accesses. If bad pointer passed to one of these function it is important to be caught. Compiler's instrumentation cannot do this since these functions are written in assembly. KASan replaces memory functions with manually instrumented variants. Original functions declared as weak symbols so strong definitions in mm/kasan/kasan.c could replace them. Original functions have aliases with '__' prefix in names, so we could call non-instrumented variant if needed. Signed-off-by: Qing Zhang <zhangqing@loongson.cn> Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
1 parent 9fbcc07 commit 5aa4ac6

File tree

17 files changed

+455
-13
lines changed

17 files changed

+455
-13
lines changed

Documentation/dev-tools/kasan.rst

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -41,8 +41,8 @@ Support
4141
Architectures
4242
~~~~~~~~~~~~~
4343

44-
Generic KASAN is supported on x86_64, arm, arm64, powerpc, riscv, s390, and
45-
xtensa, and the tag-based KASAN modes are supported only on arm64.
44+
Generic KASAN is supported on x86_64, arm, arm64, powerpc, riscv, s390, xtensa,
45+
and loongarch, and the tag-based KASAN modes are supported only on arm64.
4646

4747
Compilers
4848
~~~~~~~~~

Documentation/features/debug/KASAN/arch-support.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
| csky: | TODO |
1414
| hexagon: | TODO |
1515
| ia64: | TODO |
16-
| loongarch: | TODO |
16+
| loongarch: | ok |
1717
| m68k: | TODO |
1818
| microblaze: | TODO |
1919
| mips: | TODO |

Documentation/translations/zh_CN/dev-tools/kasan.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ KASAN有三种模式:
4242
体系架构
4343
~~~~~~~~
4444

45-
在x86_64、arm、arm64、powerpc、riscv、s390和xtensa上支持通用KASAN
45+
在x86_64、arm、arm64、powerpc、riscv、s390、xtensa和loongarch上支持通用KASAN
4646
而基于标签的KASAN模式只在arm64上支持。
4747

4848
编译器

arch/loongarch/Kconfig

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@ config LOONGARCH
88
select ACPI_PPTT if ACPI
99
select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI
1010
select ARCH_BINFMT_ELF_STATE
11+
select ARCH_DISABLE_KASAN_INLINE
1112
select ARCH_ENABLE_MEMORY_HOTPLUG
1213
select ARCH_ENABLE_MEMORY_HOTREMOVE
1314
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
@@ -92,6 +93,7 @@ config LOONGARCH
9293
select HAVE_ARCH_AUDITSYSCALL
9394
select HAVE_ARCH_JUMP_LABEL
9495
select HAVE_ARCH_JUMP_LABEL_RELATIVE
96+
select HAVE_ARCH_KASAN
9597
select HAVE_ARCH_KFENCE
9698
select HAVE_ARCH_KGDB if PERF_EVENTS
9799
select HAVE_ARCH_MMAP_RND_BITS if MMU
@@ -669,6 +671,11 @@ config ARCH_MMAP_RND_BITS_MAX
669671
config ARCH_SUPPORTS_UPROBES
670672
def_bool y
671673

674+
config KASAN_SHADOW_OFFSET
675+
hex
676+
default 0x0
677+
depends on KASAN
678+
672679
menu "Power management options"
673680

674681
config ARCH_SUSPEND_POSSIBLE

arch/loongarch/Makefile

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,10 @@ LDFLAGS_vmlinux += -static -pie --no-dynamic-linker -z notext
8484
endif
8585

8686
cflags-y += $(call cc-option, -mno-check-zero-division)
87+
88+
ifndef CONFIG_KASAN
8789
cflags-y += -fno-builtin-memcpy -fno-builtin-memmove -fno-builtin-memset
90+
endif
8891

8992
load-y = 0x9000000000200000
9093
bootvars-y = VMLINUX_LOAD_ADDRESS=$(load-y)

arch/loongarch/include/asm/kasan.h

Lines changed: 126 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,126 @@
1+
/* SPDX-License-Identifier: GPL-2.0 */
2+
#ifndef __ASM_KASAN_H
3+
#define __ASM_KASAN_H
4+
5+
#ifndef __ASSEMBLY__
6+
7+
#include <linux/linkage.h>
8+
#include <linux/mmzone.h>
9+
#include <asm/addrspace.h>
10+
#include <asm/io.h>
11+
#include <asm/pgtable.h>
12+
13+
#define __HAVE_ARCH_SHADOW_MAP
14+
15+
#define KASAN_SHADOW_SCALE_SHIFT 3
16+
#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
17+
18+
#define XRANGE_SHIFT (48)
19+
20+
/* Valid address length */
21+
#define XRANGE_SHADOW_SHIFT (PGDIR_SHIFT + PAGE_SHIFT - 3)
22+
/* Used for taking out the valid address */
23+
#define XRANGE_SHADOW_MASK GENMASK_ULL(XRANGE_SHADOW_SHIFT - 1, 0)
24+
/* One segment whole address space size */
25+
#define XRANGE_SIZE (XRANGE_SHADOW_MASK + 1)
26+
27+
/* 64-bit segment value. */
28+
#define XKPRANGE_UC_SEG (0x8000)
29+
#define XKPRANGE_CC_SEG (0x9000)
30+
#define XKVRANGE_VC_SEG (0xffff)
31+
32+
/* Cached */
33+
#define XKPRANGE_CC_START CACHE_BASE
34+
#define XKPRANGE_CC_SIZE XRANGE_SIZE
35+
#define XKPRANGE_CC_KASAN_OFFSET (0)
36+
#define XKPRANGE_CC_SHADOW_SIZE (XKPRANGE_CC_SIZE >> KASAN_SHADOW_SCALE_SHIFT)
37+
#define XKPRANGE_CC_SHADOW_END (XKPRANGE_CC_KASAN_OFFSET + XKPRANGE_CC_SHADOW_SIZE)
38+
39+
/* UnCached */
40+
#define XKPRANGE_UC_START UNCACHE_BASE
41+
#define XKPRANGE_UC_SIZE XRANGE_SIZE
42+
#define XKPRANGE_UC_KASAN_OFFSET XKPRANGE_CC_SHADOW_END
43+
#define XKPRANGE_UC_SHADOW_SIZE (XKPRANGE_UC_SIZE >> KASAN_SHADOW_SCALE_SHIFT)
44+
#define XKPRANGE_UC_SHADOW_END (XKPRANGE_UC_KASAN_OFFSET + XKPRANGE_UC_SHADOW_SIZE)
45+
46+
/* VMALLOC (Cached or UnCached) */
47+
#define XKVRANGE_VC_START MODULES_VADDR
48+
#define XKVRANGE_VC_SIZE round_up(KFENCE_AREA_END - MODULES_VADDR + 1, PGDIR_SIZE)
49+
#define XKVRANGE_VC_KASAN_OFFSET XKPRANGE_UC_SHADOW_END
50+
#define XKVRANGE_VC_SHADOW_SIZE (XKVRANGE_VC_SIZE >> KASAN_SHADOW_SCALE_SHIFT)
51+
#define XKVRANGE_VC_SHADOW_END (XKVRANGE_VC_KASAN_OFFSET + XKVRANGE_VC_SHADOW_SIZE)
52+
53+
/* KAsan shadow memory start right after vmalloc. */
54+
#define KASAN_SHADOW_START round_up(KFENCE_AREA_END, PGDIR_SIZE)
55+
#define KASAN_SHADOW_SIZE (XKVRANGE_VC_SHADOW_END - XKPRANGE_CC_KASAN_OFFSET)
56+
#define KASAN_SHADOW_END round_up(KASAN_SHADOW_START + KASAN_SHADOW_SIZE, PGDIR_SIZE)
57+
58+
#define XKPRANGE_CC_SHADOW_OFFSET (KASAN_SHADOW_START + XKPRANGE_CC_KASAN_OFFSET)
59+
#define XKPRANGE_UC_SHADOW_OFFSET (KASAN_SHADOW_START + XKPRANGE_UC_KASAN_OFFSET)
60+
#define XKVRANGE_VC_SHADOW_OFFSET (KASAN_SHADOW_START + XKVRANGE_VC_KASAN_OFFSET)
61+
62+
extern bool kasan_early_stage;
63+
extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
64+
65+
#define kasan_arch_is_ready kasan_arch_is_ready
66+
static __always_inline bool kasan_arch_is_ready(void)
67+
{
68+
return !kasan_early_stage;
69+
}
70+
71+
static inline void *kasan_mem_to_shadow(const void *addr)
72+
{
73+
if (!kasan_arch_is_ready()) {
74+
return (void *)(kasan_early_shadow_page);
75+
} else {
76+
unsigned long maddr = (unsigned long)addr;
77+
unsigned long xrange = (maddr >> XRANGE_SHIFT) & 0xffff;
78+
unsigned long offset = 0;
79+
80+
maddr &= XRANGE_SHADOW_MASK;
81+
switch (xrange) {
82+
case XKPRANGE_CC_SEG:
83+
offset = XKPRANGE_CC_SHADOW_OFFSET;
84+
break;
85+
case XKPRANGE_UC_SEG:
86+
offset = XKPRANGE_UC_SHADOW_OFFSET;
87+
break;
88+
case XKVRANGE_VC_SEG:
89+
offset = XKVRANGE_VC_SHADOW_OFFSET;
90+
break;
91+
default:
92+
WARN_ON(1);
93+
return NULL;
94+
}
95+
96+
return (void *)((maddr >> KASAN_SHADOW_SCALE_SHIFT) + offset);
97+
}
98+
}
99+
100+
static inline const void *kasan_shadow_to_mem(const void *shadow_addr)
101+
{
102+
unsigned long addr = (unsigned long)shadow_addr;
103+
104+
if (unlikely(addr > KASAN_SHADOW_END) ||
105+
unlikely(addr < KASAN_SHADOW_START)) {
106+
WARN_ON(1);
107+
return NULL;
108+
}
109+
110+
if (addr >= XKVRANGE_VC_SHADOW_OFFSET)
111+
return (void *)(((addr - XKVRANGE_VC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKVRANGE_VC_START);
112+
else if (addr >= XKPRANGE_UC_SHADOW_OFFSET)
113+
return (void *)(((addr - XKPRANGE_UC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_UC_START);
114+
else if (addr >= XKPRANGE_CC_SHADOW_OFFSET)
115+
return (void *)(((addr - XKPRANGE_CC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_CC_START);
116+
else {
117+
WARN_ON(1);
118+
return NULL;
119+
}
120+
}
121+
122+
void kasan_init(void);
123+
asmlinkage void kasan_early_init(void);
124+
125+
#endif
126+
#endif

arch/loongarch/include/asm/pgtable.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -89,9 +89,16 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
8989
#endif
9090

9191
#define VMALLOC_START MODULES_END
92+
93+
#ifndef CONFIG_KASAN
9294
#define VMALLOC_END \
9395
(vm_map_base + \
9496
min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits)) - PMD_SIZE - VMEMMAP_SIZE - KFENCE_AREA_SIZE)
97+
#else
98+
#define VMALLOC_END \
99+
(vm_map_base + \
100+
min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits) / 2) - PMD_SIZE - VMEMMAP_SIZE - KFENCE_AREA_SIZE)
101+
#endif
95102

96103
#define vmemmap ((struct page *)((VMALLOC_END + PMD_SIZE) & PMD_MASK))
97104
#define VMEMMAP_END ((unsigned long)vmemmap + VMEMMAP_SIZE - 1)

arch/loongarch/include/asm/string.h

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,11 +7,31 @@
77

88
#define __HAVE_ARCH_MEMSET
99
extern void *memset(void *__s, int __c, size_t __count);
10+
extern void *__memset(void *__s, int __c, size_t __count);
1011

1112
#define __HAVE_ARCH_MEMCPY
1213
extern void *memcpy(void *__to, __const__ void *__from, size_t __n);
14+
extern void *__memcpy(void *__to, __const__ void *__from, size_t __n);
1315

1416
#define __HAVE_ARCH_MEMMOVE
1517
extern void *memmove(void *__dest, __const__ void *__src, size_t __n);
18+
extern void *__memmove(void *__dest, __const__ void *__src, size_t __n);
19+
20+
#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
21+
22+
/*
23+
* For files that are not instrumented (e.g. mm/slub.c) we
24+
* should use not instrumented version of mem* functions.
25+
*/
26+
27+
#define memset(s, c, n) __memset(s, c, n)
28+
#define memcpy(dst, src, len) __memcpy(dst, src, len)
29+
#define memmove(dst, src, len) __memmove(dst, src, len)
30+
31+
#ifndef __NO_FORTIFY
32+
#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
33+
#endif
34+
35+
#endif
1636

1737
#endif /* _ASM_STRING_H */

arch/loongarch/kernel/Makefile

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,12 @@ ifdef CONFIG_FUNCTION_TRACER
3434
CFLAGS_REMOVE_rethook_trampoline.o = $(CC_FLAGS_FTRACE)
3535
endif
3636

37+
KASAN_SANITIZE_efi.o := n
38+
KASAN_SANITIZE_cpu-probe.o := n
39+
KASAN_SANITIZE_traps.o := n
40+
KASAN_SANITIZE_smp.o := n
41+
KASAN_SANITIZE_vdso.o := n
42+
3743
obj-$(CONFIG_MODULES) += module.o module-sections.o
3844
obj-$(CONFIG_STACKTRACE) += stacktrace.o
3945

arch/loongarch/kernel/head.S

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -104,6 +104,10 @@ SYM_CODE_START(kernel_entry) # kernel entry point
104104

105105
#endif /* CONFIG_RELOCATABLE */
106106

107+
#ifdef CONFIG_KASAN
108+
bl kasan_early_init
109+
#endif
110+
107111
bl start_kernel
108112
ASM_BUG()
109113

0 commit comments

Comments
 (0)