Skip to content

Commit 2e17430

Browse files
SiFiveHollandpalmer-dabbelt
authored andcommitted
riscv: Add support for the tagged address ABI
When pointer masking is enabled for userspace, the kernel can accept tagged pointers as arguments to some system calls. Allow this by untagging the pointers in access_ok() and the uaccess routines. The uaccess routines must peform untagging in software because U-mode and S-mode have entirely separate pointer masking configurations. In fact, hardware may not even implement pointer masking for S-mode. Since the number of tag bits is variable, untagged_addr_remote() needs to know what PMLEN to use for the remote mm. Therefore, the pointer masking mode must be the same for all threads sharing an mm. Enforce this with a lock flag in the mm context, as x86 does for LAM. The flag gets reset in init_new_context() during fork(), as the new mm is no longer multithreaded. Reviewed-by: Charlie Jenkins <charlie@rivosinc.com> Tested-by: Charlie Jenkins <charlie@rivosinc.com> Signed-off-by: Samuel Holland <samuel.holland@sifive.com> Link: https://lore.kernel.org/r/20241016202814.4061541-6-samuel.holland@sifive.com Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
1 parent 09d6775 commit 2e17430

File tree

5 files changed

+130
-10
lines changed

5 files changed

+130
-10
lines changed

Documentation/arch/riscv/uabi.rst

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -80,3 +80,7 @@ number of mask/tag bits needed by the application. ``PR_PMLEN`` is interpreted
8080
as a lower bound; if the kernel is unable to satisfy the request, the
8181
``PR_SET_TAGGED_ADDR_CTRL`` operation will fail. The actual number of tag bits
8282
is returned in ``PR_PMLEN`` by the ``PR_GET_TAGGED_ADDR_CTRL`` operation.
83+
84+
Additionally, when pointer masking is enabled (``PR_PMLEN`` is greater than 0),
85+
a tagged address ABI is supported, with the same interface and behavior as
86+
documented for AArch64 (Documentation/arch/arm64/tagged-address-abi.rst).

arch/riscv/include/asm/mmu.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,9 +25,16 @@ typedef struct {
2525
#ifdef CONFIG_BINFMT_ELF_FDPIC
2626
unsigned long exec_fdpic_loadmap;
2727
unsigned long interp_fdpic_loadmap;
28+
#endif
29+
unsigned long flags;
30+
#ifdef CONFIG_RISCV_ISA_SUPM
31+
u8 pmlen;
2832
#endif
2933
} mm_context_t;
3034

35+
/* Lock the pointer masking mode because this mm is multithreaded */
36+
#define MM_CONTEXT_LOCK_PMLEN 0
37+
3138
#define cntx2asid(cntx) ((cntx) & SATP_ASID_MASK)
3239
#define cntx2version(cntx) ((cntx) & ~SATP_ASID_MASK)
3340

arch/riscv/include/asm/mmu_context.h

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,9 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next,
2020
static inline void activate_mm(struct mm_struct *prev,
2121
struct mm_struct *next)
2222
{
23+
#ifdef CONFIG_RISCV_ISA_SUPM
24+
next->context.pmlen = 0;
25+
#endif
2326
switch_mm(prev, next, NULL);
2427
}
2528

@@ -30,11 +33,21 @@ static inline int init_new_context(struct task_struct *tsk,
3033
#ifdef CONFIG_MMU
3134
atomic_long_set(&mm->context.id, 0);
3235
#endif
36+
if (IS_ENABLED(CONFIG_RISCV_ISA_SUPM))
37+
clear_bit(MM_CONTEXT_LOCK_PMLEN, &mm->context.flags);
3338
return 0;
3439
}
3540

3641
DECLARE_STATIC_KEY_FALSE(use_asid_allocator);
3742

43+
#ifdef CONFIG_RISCV_ISA_SUPM
44+
#define mm_untag_mask mm_untag_mask
45+
static inline unsigned long mm_untag_mask(struct mm_struct *mm)
46+
{
47+
return -1UL >> mm->context.pmlen;
48+
}
49+
#endif
50+
3851
#include <asm-generic/mmu_context.h>
3952

4053
#endif /* _ASM_RISCV_MMU_CONTEXT_H */

arch/riscv/include/asm/uaccess.h

Lines changed: 38 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -9,8 +9,41 @@
99
#define _ASM_RISCV_UACCESS_H
1010

1111
#include <asm/asm-extable.h>
12+
#include <asm/cpufeature.h>
1213
#include <asm/pgtable.h> /* for TASK_SIZE */
1314

15+
#ifdef CONFIG_RISCV_ISA_SUPM
16+
static inline unsigned long __untagged_addr_remote(struct mm_struct *mm, unsigned long addr)
17+
{
18+
if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SUPM)) {
19+
u8 pmlen = mm->context.pmlen;
20+
21+
/* Virtual addresses are sign-extended; physical addresses are zero-extended. */
22+
if (IS_ENABLED(CONFIG_MMU))
23+
return (long)(addr << pmlen) >> pmlen;
24+
else
25+
return (addr << pmlen) >> pmlen;
26+
}
27+
28+
return addr;
29+
}
30+
31+
#define untagged_addr(addr) ({ \
32+
unsigned long __addr = (__force unsigned long)(addr); \
33+
(__force __typeof__(addr))__untagged_addr_remote(current->mm, __addr); \
34+
})
35+
36+
#define untagged_addr_remote(mm, addr) ({ \
37+
unsigned long __addr = (__force unsigned long)(addr); \
38+
mmap_assert_locked(mm); \
39+
(__force __typeof__(addr))__untagged_addr_remote(mm, __addr); \
40+
})
41+
42+
#define access_ok(addr, size) likely(__access_ok(untagged_addr(addr), size))
43+
#else
44+
#define untagged_addr(addr) (addr)
45+
#endif
46+
1447
/*
1548
* User space memory access functions
1649
*/
@@ -130,7 +163,7 @@ do { \
130163
*/
131164
#define __get_user(x, ptr) \
132165
({ \
133-
const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
166+
const __typeof__(*(ptr)) __user *__gu_ptr = untagged_addr(ptr); \
134167
long __gu_err = 0; \
135168
\
136169
__chk_user_ptr(__gu_ptr); \
@@ -246,7 +279,7 @@ do { \
246279
*/
247280
#define __put_user(x, ptr) \
248281
({ \
249-
__typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
282+
__typeof__(*(ptr)) __user *__gu_ptr = untagged_addr(ptr); \
250283
__typeof__(*__gu_ptr) __val = (x); \
251284
long __pu_err = 0; \
252285
\
@@ -293,13 +326,13 @@ unsigned long __must_check __asm_copy_from_user(void *to,
293326
static inline unsigned long
294327
raw_copy_from_user(void *to, const void __user *from, unsigned long n)
295328
{
296-
return __asm_copy_from_user(to, from, n);
329+
return __asm_copy_from_user(to, untagged_addr(from), n);
297330
}
298331

299332
static inline unsigned long
300333
raw_copy_to_user(void __user *to, const void *from, unsigned long n)
301334
{
302-
return __asm_copy_to_user(to, from, n);
335+
return __asm_copy_to_user(untagged_addr(to), from, n);
303336
}
304337

305338
extern long strncpy_from_user(char *dest, const char __user *src, long count);
@@ -314,7 +347,7 @@ unsigned long __must_check clear_user(void __user *to, unsigned long n)
314347
{
315348
might_fault();
316349
return access_ok(to, n) ?
317-
__clear_user(to, n) : n;
350+
__clear_user(untagged_addr(to), n) : n;
318351
}
319352

320353
#define __get_kernel_nofault(dst, src, type, err_label) \

arch/riscv/kernel/process.c

Lines changed: 68 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -213,6 +213,10 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
213213
unsigned long tls = args->tls;
214214
struct pt_regs *childregs = task_pt_regs(p);
215215

216+
/* Ensure all threads in this mm have the same pointer masking mode. */
217+
if (IS_ENABLED(CONFIG_RISCV_ISA_SUPM) && p->mm && (clone_flags & CLONE_VM))
218+
set_bit(MM_CONTEXT_LOCK_PMLEN, &p->mm->context.flags);
219+
216220
memset(&p->thread.s, 0, sizeof(p->thread.s));
217221

218222
/* p->thread holds context to be restored by __switch_to() */
@@ -258,10 +262,16 @@ enum {
258262
static bool have_user_pmlen_7;
259263
static bool have_user_pmlen_16;
260264

265+
/*
266+
* Control the relaxed ABI allowing tagged user addresses into the kernel.
267+
*/
268+
static unsigned int tagged_addr_disabled;
269+
261270
long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg)
262271
{
263-
unsigned long valid_mask = PR_PMLEN_MASK;
272+
unsigned long valid_mask = PR_PMLEN_MASK | PR_TAGGED_ADDR_ENABLE;
264273
struct thread_info *ti = task_thread_info(task);
274+
struct mm_struct *mm = task->mm;
265275
unsigned long pmm;
266276
u8 pmlen;
267277

@@ -276,16 +286,41 @@ long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg)
276286
* in case choosing a larger PMLEN has a performance impact.
277287
*/
278288
pmlen = FIELD_GET(PR_PMLEN_MASK, arg);
279-
if (pmlen == PMLEN_0)
289+
if (pmlen == PMLEN_0) {
280290
pmm = ENVCFG_PMM_PMLEN_0;
281-
else if (pmlen <= PMLEN_7 && have_user_pmlen_7)
291+
} else if (pmlen <= PMLEN_7 && have_user_pmlen_7) {
292+
pmlen = PMLEN_7;
282293
pmm = ENVCFG_PMM_PMLEN_7;
283-
else if (pmlen <= PMLEN_16 && have_user_pmlen_16)
294+
} else if (pmlen <= PMLEN_16 && have_user_pmlen_16) {
295+
pmlen = PMLEN_16;
284296
pmm = ENVCFG_PMM_PMLEN_16;
285-
else
297+
} else {
286298
return -EINVAL;
299+
}
300+
301+
/*
302+
* Do not allow the enabling of the tagged address ABI if globally
303+
* disabled via sysctl abi.tagged_addr_disabled, if pointer masking
304+
* is disabled for userspace.
305+
*/
306+
if (arg & PR_TAGGED_ADDR_ENABLE && (tagged_addr_disabled || !pmlen))
307+
return -EINVAL;
308+
309+
if (!(arg & PR_TAGGED_ADDR_ENABLE))
310+
pmlen = PMLEN_0;
311+
312+
if (mmap_write_lock_killable(mm))
313+
return -EINTR;
314+
315+
if (test_bit(MM_CONTEXT_LOCK_PMLEN, &mm->context.flags) && mm->context.pmlen != pmlen) {
316+
mmap_write_unlock(mm);
317+
return -EBUSY;
318+
}
287319

288320
envcfg_update_bits(task, ENVCFG_PMM, pmm);
321+
mm->context.pmlen = pmlen;
322+
323+
mmap_write_unlock(mm);
289324

290325
return 0;
291326
}
@@ -298,6 +333,10 @@ long get_tagged_addr_ctrl(struct task_struct *task)
298333
if (is_compat_thread(ti))
299334
return -EINVAL;
300335

336+
/*
337+
* The mm context's pmlen is set only when the tagged address ABI is
338+
* enabled, so the effective PMLEN must be extracted from envcfg.PMM.
339+
*/
301340
switch (task->thread.envcfg & ENVCFG_PMM) {
302341
case ENVCFG_PMM_PMLEN_7:
303342
ret = FIELD_PREP(PR_PMLEN_MASK, PMLEN_7);
@@ -307,6 +346,9 @@ long get_tagged_addr_ctrl(struct task_struct *task)
307346
break;
308347
}
309348

349+
if (task->mm->context.pmlen)
350+
ret |= PR_TAGGED_ADDR_ENABLE;
351+
310352
return ret;
311353
}
312354

@@ -316,6 +358,24 @@ static bool try_to_set_pmm(unsigned long value)
316358
return (csr_read_clear(CSR_ENVCFG, ENVCFG_PMM) & ENVCFG_PMM) == value;
317359
}
318360

361+
/*
362+
* Global sysctl to disable the tagged user addresses support. This control
363+
* only prevents the tagged address ABI enabling via prctl() and does not
364+
* disable it for tasks that already opted in to the relaxed ABI.
365+
*/
366+
367+
static struct ctl_table tagged_addr_sysctl_table[] = {
368+
{
369+
.procname = "tagged_addr_disabled",
370+
.mode = 0644,
371+
.data = &tagged_addr_disabled,
372+
.maxlen = sizeof(int),
373+
.proc_handler = proc_dointvec_minmax,
374+
.extra1 = SYSCTL_ZERO,
375+
.extra2 = SYSCTL_ONE,
376+
},
377+
};
378+
319379
static int __init tagged_addr_init(void)
320380
{
321381
if (!riscv_has_extension_unlikely(RISCV_ISA_EXT_SUPM))
@@ -329,6 +389,9 @@ static int __init tagged_addr_init(void)
329389
have_user_pmlen_7 = try_to_set_pmm(ENVCFG_PMM_PMLEN_7);
330390
have_user_pmlen_16 = try_to_set_pmm(ENVCFG_PMM_PMLEN_16);
331391

392+
if (!register_sysctl("abi", tagged_addr_sysctl_table))
393+
return -EINVAL;
394+
332395
return 0;
333396
}
334397
core_initcall(tagged_addr_init);

0 commit comments

Comments
 (0)