Skip to content

Commit 6ad3df5

Browse files
lienzechenhuacai
authored andcommitted
LoongArch: Add KFENCE (Kernel Electric-Fence) support
The LoongArch architecture is quite different from other architectures. When the allocating of KFENCE itself is done, it is mapped to the direct mapping configuration window [1] by default on LoongArch. It means that it is not possible to use the page table mapped mode which required by the KFENCE system and therefore it should be remapped to the appropriate region. This patch adds architecture specific implementation details for KFENCE. In particular, this implements the required interface in <asm/kfence.h>. Tested this patch by running the testcases and all passed. [1] https://loongson.github.io/LoongArch-Documentation/LoongArch-Vol1-EN.html#virtual-address-space-and-address-translation-mode Signed-off-by: Enze Li <lienze@kylinos.cn> Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
1 parent 95bb5b6 commit 6ad3df5

File tree

4 files changed

+86
-9
lines changed

4 files changed

+86
-9
lines changed

arch/loongarch/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -92,6 +92,7 @@ config LOONGARCH
9292
select HAVE_ARCH_AUDITSYSCALL
9393
select HAVE_ARCH_JUMP_LABEL
9494
select HAVE_ARCH_JUMP_LABEL_RELATIVE
95+
select HAVE_ARCH_KFENCE
9596
select HAVE_ARCH_KGDB if PERF_EVENTS
9697
select HAVE_ARCH_MMAP_RND_BITS if MMU
9798
select HAVE_ARCH_SECCOMP_FILTER

arch/loongarch/include/asm/kfence.h

Lines changed: 61 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,61 @@
1+
/* SPDX-License-Identifier: GPL-2.0 */
2+
/*
3+
* KFENCE support for LoongArch.
4+
*
5+
* Author: Enze Li <lienze@kylinos.cn>
6+
* Copyright (C) 2022-2023 KylinSoft Corporation.
7+
*/
8+
9+
#ifndef _ASM_LOONGARCH_KFENCE_H
10+
#define _ASM_LOONGARCH_KFENCE_H
11+
12+
#include <linux/kfence.h>
13+
#include <asm/pgtable.h>
14+
#include <asm/tlb.h>
15+
16+
static inline bool arch_kfence_init_pool(void)
17+
{
18+
int err;
19+
char *kfence_pool = __kfence_pool;
20+
struct vm_struct *area;
21+
22+
area = __get_vm_area_caller(KFENCE_POOL_SIZE, VM_IOREMAP,
23+
KFENCE_AREA_START, KFENCE_AREA_END,
24+
__builtin_return_address(0));
25+
if (!area)
26+
return false;
27+
28+
__kfence_pool = (char *)area->addr;
29+
err = ioremap_page_range((unsigned long)__kfence_pool,
30+
(unsigned long)__kfence_pool + KFENCE_POOL_SIZE,
31+
virt_to_phys((void *)kfence_pool), PAGE_KERNEL);
32+
if (err) {
33+
free_vm_area(area);
34+
__kfence_pool = kfence_pool;
35+
return false;
36+
}
37+
38+
return true;
39+
}
40+
41+
/* Protect the given page and flush TLB. */
42+
static inline bool kfence_protect_page(unsigned long addr, bool protect)
43+
{
44+
pte_t *pte = virt_to_kpte(addr);
45+
46+
if (WARN_ON(!pte) || pte_none(*pte))
47+
return false;
48+
49+
if (protect)
50+
set_pte(pte, __pte(pte_val(*pte) & ~(_PAGE_VALID | _PAGE_PRESENT)));
51+
else
52+
set_pte(pte, __pte(pte_val(*pte) | (_PAGE_VALID | _PAGE_PRESENT)));
53+
54+
preempt_disable();
55+
local_flush_tlb_one(addr);
56+
preempt_enable();
57+
58+
return true;
59+
}
60+
61+
#endif /* _ASM_LOONGARCH_KFENCE_H */

arch/loongarch/include/asm/pgtable.h

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -82,14 +82,23 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
8282
#define MODULES_VADDR (vm_map_base + PCI_IOSIZE + (2 * PAGE_SIZE))
8383
#define MODULES_END (MODULES_VADDR + SZ_256M)
8484

85+
#ifdef CONFIG_KFENCE
86+
#define KFENCE_AREA_SIZE (((CONFIG_KFENCE_NUM_OBJECTS + 1) * 2 + 2) * PAGE_SIZE)
87+
#else
88+
#define KFENCE_AREA_SIZE 0
89+
#endif
90+
8591
#define VMALLOC_START MODULES_END
8692
#define VMALLOC_END \
8793
(vm_map_base + \
88-
min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits)) - PMD_SIZE - VMEMMAP_SIZE)
94+
min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits)) - PMD_SIZE - VMEMMAP_SIZE - KFENCE_AREA_SIZE)
8995

9096
#define vmemmap ((struct page *)((VMALLOC_END + PMD_SIZE) & PMD_MASK))
9197
#define VMEMMAP_END ((unsigned long)vmemmap + VMEMMAP_SIZE - 1)
9298

99+
#define KFENCE_AREA_START (VMEMMAP_END + 1)
100+
#define KFENCE_AREA_END (KFENCE_AREA_START + KFENCE_AREA_SIZE - 1)
101+
93102
#define pte_ERROR(e) \
94103
pr_err("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
95104
#ifndef __PAGETABLE_PMD_FOLDED

arch/loongarch/mm/fault.c

Lines changed: 14 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -23,21 +23,26 @@
2323
#include <linux/kprobes.h>
2424
#include <linux/perf_event.h>
2525
#include <linux/uaccess.h>
26+
#include <linux/kfence.h>
2627

2728
#include <asm/branch.h>
2829
#include <asm/mmu_context.h>
2930
#include <asm/ptrace.h>
3031

3132
int show_unhandled_signals = 1;
3233

33-
static void __kprobes no_context(struct pt_regs *regs, unsigned long address)
34+
static void __kprobes no_context(struct pt_regs *regs,
35+
unsigned long write, unsigned long address)
3436
{
3537
const int field = sizeof(unsigned long) * 2;
3638

3739
/* Are we prepared to handle this kernel fault? */
3840
if (fixup_exception(regs))
3941
return;
4042

43+
if (kfence_handle_page_fault(address, write, regs))
44+
return;
45+
4146
/*
4247
* Oops. The kernel tried to access some bad page. We'll have to
4348
* terminate things with extreme prejudice.
@@ -51,14 +56,15 @@ static void __kprobes no_context(struct pt_regs *regs, unsigned long address)
5156
die("Oops", regs);
5257
}
5358

54-
static void __kprobes do_out_of_memory(struct pt_regs *regs, unsigned long address)
59+
static void __kprobes do_out_of_memory(struct pt_regs *regs,
60+
unsigned long write, unsigned long address)
5561
{
5662
/*
5763
* We ran out of memory, call the OOM killer, and return the userspace
5864
* (which will retry the fault, or kill us if we got oom-killed).
5965
*/
6066
if (!user_mode(regs)) {
61-
no_context(regs, address);
67+
no_context(regs, write, address);
6268
return;
6369
}
6470
pagefault_out_of_memory();
@@ -69,7 +75,7 @@ static void __kprobes do_sigbus(struct pt_regs *regs,
6975
{
7076
/* Kernel mode? Handle exceptions or die */
7177
if (!user_mode(regs)) {
72-
no_context(regs, address);
78+
no_context(regs, write, address);
7379
return;
7480
}
7581

@@ -90,7 +96,7 @@ static void __kprobes do_sigsegv(struct pt_regs *regs,
9096

9197
/* Kernel mode? Handle exceptions or die */
9298
if (!user_mode(regs)) {
93-
no_context(regs, address);
99+
no_context(regs, write, address);
94100
return;
95101
}
96102

@@ -149,7 +155,7 @@ static void __kprobes __do_page_fault(struct pt_regs *regs,
149155
*/
150156
if (address & __UA_LIMIT) {
151157
if (!user_mode(regs))
152-
no_context(regs, address);
158+
no_context(regs, write, address);
153159
else
154160
do_sigsegv(regs, write, address, si_code);
155161
return;
@@ -211,7 +217,7 @@ static void __kprobes __do_page_fault(struct pt_regs *regs,
211217

212218
if (fault_signal_pending(fault, regs)) {
213219
if (!user_mode(regs))
214-
no_context(regs, address);
220+
no_context(regs, write, address);
215221
return;
216222
}
217223

@@ -232,7 +238,7 @@ static void __kprobes __do_page_fault(struct pt_regs *regs,
232238
if (unlikely(fault & VM_FAULT_ERROR)) {
233239
mmap_read_unlock(mm);
234240
if (fault & VM_FAULT_OOM) {
235-
do_out_of_memory(regs, address);
241+
do_out_of_memory(regs, write, address);
236242
return;
237243
} else if (fault & VM_FAULT_SIGSEGV) {
238244
do_sigsegv(regs, write, address, si_code);

0 commit comments

Comments
 (0)