Skip to content

Commit 0f506c7

Browse files
committed
Merge tag 'riscv-for-linus-6.4-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux
Pull RISC-V fixes from Palmer Dabbelt: - A fix to avoid ISA-disallowed privilege mappings that can result from WRITE+EXEC mmap requests from userspace. - A fix for kfence to handle the huge pages. - A fix to avoid converting misaligned VAs to huge pages. - ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE has been selected so kprobe can understand user pointers. * tag 'riscv-for-linus-6.4-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux: riscv: fix kprobe __user string arg print fault issue riscv: Check the virtual alignment before choosing a map size riscv: Fix kfence now that the linear mapping can be backed by PUD/P4D/PGD riscv: mm: Ensure prot of VM_WRITE and VM_EXEC must be readable
2 parents 87aceaa + 99a670b commit 0f506c7

File tree

4 files changed

+39
-46
lines changed

4 files changed

+39
-46
lines changed

arch/riscv/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@ config RISCV
2626
select ARCH_HAS_GIGANTIC_PAGE
2727
select ARCH_HAS_KCOV
2828
select ARCH_HAS_MMIOWB
29+
select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
2930
select ARCH_HAS_PMEM_API
3031
select ARCH_HAS_PTE_SPECIAL
3132
select ARCH_HAS_SET_DIRECT_MAP if MMU

arch/riscv/include/asm/kfence.h

Lines changed: 0 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -8,41 +8,8 @@
88
#include <asm-generic/pgalloc.h>
99
#include <asm/pgtable.h>
1010

11-
static inline int split_pmd_page(unsigned long addr)
12-
{
13-
int i;
14-
unsigned long pfn = PFN_DOWN(__pa((addr & PMD_MASK)));
15-
pmd_t *pmd = pmd_off_k(addr);
16-
pte_t *pte = pte_alloc_one_kernel(&init_mm);
17-
18-
if (!pte)
19-
return -ENOMEM;
20-
21-
for (i = 0; i < PTRS_PER_PTE; i++)
22-
set_pte(pte + i, pfn_pte(pfn + i, PAGE_KERNEL));
23-
set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(pte)), PAGE_TABLE));
24-
25-
flush_tlb_kernel_range(addr, addr + PMD_SIZE);
26-
return 0;
27-
}
28-
2911
static inline bool arch_kfence_init_pool(void)
3012
{
31-
int ret;
32-
unsigned long addr;
33-
pmd_t *pmd;
34-
35-
for (addr = (unsigned long)__kfence_pool; is_kfence_address((void *)addr);
36-
addr += PAGE_SIZE) {
37-
pmd = pmd_off_k(addr);
38-
39-
if (pmd_leaf(*pmd)) {
40-
ret = split_pmd_page(addr);
41-
if (ret)
42-
return false;
43-
}
44-
}
45-
4613
return true;
4714
}
4815

arch/riscv/include/asm/pgtable.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -165,8 +165,7 @@ extern struct pt_alloc_ops pt_ops __initdata;
165165
_PAGE_EXEC | _PAGE_WRITE)
166166

167167
#define PAGE_COPY PAGE_READ
168-
#define PAGE_COPY_EXEC PAGE_EXEC
169-
#define PAGE_COPY_READ_EXEC PAGE_READ_EXEC
168+
#define PAGE_COPY_EXEC PAGE_READ_EXEC
170169
#define PAGE_SHARED PAGE_WRITE
171170
#define PAGE_SHARED_EXEC PAGE_WRITE_EXEC
172171

arch/riscv/mm/init.c

Lines changed: 37 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@
2323
#ifdef CONFIG_RELOCATABLE
2424
#include <linux/elf.h>
2525
#endif
26+
#include <linux/kfence.h>
2627

2728
#include <asm/fixmap.h>
2829
#include <asm/tlbflush.h>
@@ -293,7 +294,7 @@ static const pgprot_t protection_map[16] = {
293294
[VM_EXEC] = PAGE_EXEC,
294295
[VM_EXEC | VM_READ] = PAGE_READ_EXEC,
295296
[VM_EXEC | VM_WRITE] = PAGE_COPY_EXEC,
296-
[VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_READ_EXEC,
297+
[VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_EXEC,
297298
[VM_SHARED] = PAGE_NONE,
298299
[VM_SHARED | VM_READ] = PAGE_READ,
299300
[VM_SHARED | VM_WRITE] = PAGE_SHARED,
@@ -659,18 +660,19 @@ void __init create_pgd_mapping(pgd_t *pgdp,
659660
create_pgd_next_mapping(nextp, va, pa, sz, prot);
660661
}
661662

662-
static uintptr_t __init best_map_size(phys_addr_t base, phys_addr_t size)
663+
static uintptr_t __init best_map_size(phys_addr_t pa, uintptr_t va,
664+
phys_addr_t size)
663665
{
664-
if (!(base & (PGDIR_SIZE - 1)) && size >= PGDIR_SIZE)
666+
if (!(pa & (PGDIR_SIZE - 1)) && !(va & (PGDIR_SIZE - 1)) && size >= PGDIR_SIZE)
665667
return PGDIR_SIZE;
666668

667-
if (!(base & (P4D_SIZE - 1)) && size >= P4D_SIZE)
669+
if (!(pa & (P4D_SIZE - 1)) && !(va & (P4D_SIZE - 1)) && size >= P4D_SIZE)
668670
return P4D_SIZE;
669671

670-
if (!(base & (PUD_SIZE - 1)) && size >= PUD_SIZE)
672+
if (!(pa & (PUD_SIZE - 1)) && !(va & (PUD_SIZE - 1)) && size >= PUD_SIZE)
671673
return PUD_SIZE;
672674

673-
if (!(base & (PMD_SIZE - 1)) && size >= PMD_SIZE)
675+
if (!(pa & (PMD_SIZE - 1)) && !(va & (PMD_SIZE - 1)) && size >= PMD_SIZE)
674676
return PMD_SIZE;
675677

676678
return PAGE_SIZE;
@@ -1167,14 +1169,16 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
11671169
}
11681170

11691171
static void __init create_linear_mapping_range(phys_addr_t start,
1170-
phys_addr_t end)
1172+
phys_addr_t end,
1173+
uintptr_t fixed_map_size)
11711174
{
11721175
phys_addr_t pa;
11731176
uintptr_t va, map_size;
11741177

11751178
for (pa = start; pa < end; pa += map_size) {
11761179
va = (uintptr_t)__va(pa);
1177-
map_size = best_map_size(pa, end - pa);
1180+
map_size = fixed_map_size ? fixed_map_size :
1181+
best_map_size(pa, va, end - pa);
11781182

11791183
create_pgd_mapping(swapper_pg_dir, va, pa, map_size,
11801184
pgprot_from_va(va));
@@ -1184,6 +1188,7 @@ static void __init create_linear_mapping_range(phys_addr_t start,
11841188
static void __init create_linear_mapping_page_table(void)
11851189
{
11861190
phys_addr_t start, end;
1191+
phys_addr_t kfence_pool __maybe_unused;
11871192
u64 i;
11881193

11891194
#ifdef CONFIG_STRICT_KERNEL_RWX
@@ -1197,6 +1202,19 @@ static void __init create_linear_mapping_page_table(void)
11971202
memblock_mark_nomap(krodata_start, krodata_size);
11981203
#endif
11991204

1205+
#ifdef CONFIG_KFENCE
1206+
/*
1207+
* kfence pool must be backed by PAGE_SIZE mappings, so allocate it
1208+
* before we setup the linear mapping so that we avoid using hugepages
1209+
* for this region.
1210+
*/
1211+
kfence_pool = memblock_phys_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
1212+
BUG_ON(!kfence_pool);
1213+
1214+
memblock_mark_nomap(kfence_pool, KFENCE_POOL_SIZE);
1215+
__kfence_pool = __va(kfence_pool);
1216+
#endif
1217+
12001218
/* Map all memory banks in the linear mapping */
12011219
for_each_mem_range(i, &start, &end) {
12021220
if (start >= end)
@@ -1207,17 +1225,25 @@ static void __init create_linear_mapping_page_table(void)
12071225
if (end >= __pa(PAGE_OFFSET) + memory_limit)
12081226
end = __pa(PAGE_OFFSET) + memory_limit;
12091227

1210-
create_linear_mapping_range(start, end);
1228+
create_linear_mapping_range(start, end, 0);
12111229
}
12121230

12131231
#ifdef CONFIG_STRICT_KERNEL_RWX
1214-
create_linear_mapping_range(ktext_start, ktext_start + ktext_size);
1232+
create_linear_mapping_range(ktext_start, ktext_start + ktext_size, 0);
12151233
create_linear_mapping_range(krodata_start,
1216-
krodata_start + krodata_size);
1234+
krodata_start + krodata_size, 0);
12171235

12181236
memblock_clear_nomap(ktext_start, ktext_size);
12191237
memblock_clear_nomap(krodata_start, krodata_size);
12201238
#endif
1239+
1240+
#ifdef CONFIG_KFENCE
1241+
create_linear_mapping_range(kfence_pool,
1242+
kfence_pool + KFENCE_POOL_SIZE,
1243+
PAGE_SIZE);
1244+
1245+
memblock_clear_nomap(kfence_pool, KFENCE_POOL_SIZE);
1246+
#endif
12211247
}
12221248

12231249
static void __init setup_vm_final(void)

0 commit comments

Comments
 (0)