Skip to content

Commit 2df4c9a

Browse files
arndbRussell King (Oracle)
authored andcommitted
ARM: 9112/1: uaccess: add __{get,put}_kernel_nofault
These mimic the behavior of get_user and put_user, except for domain switching, address limit checking and handling of mismatched sizes, none of which are relevant here. To work with pre-Armv6 kernels, this has to avoid TUSER() inside of the new macros, the new approach passes the "t" string along with the opcode, which is a bit uglier but avoids duplicating more code. As there is no __get_user_asm_dword(), I work around it by copying 32 bit at a time, which is possible because the output size is known. Acked-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Arnd Bergmann <arnd@arndb.de> Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
1 parent 7e2d8c2 commit 2df4c9a

File tree

1 file changed

+83
-40
lines changed

1 file changed

+83
-40
lines changed

arch/arm/include/asm/uaccess.h

Lines changed: 83 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -308,11 +308,11 @@ static inline void set_fs(mm_segment_t fs)
308308
#define __get_user(x, ptr) \
309309
({ \
310310
long __gu_err = 0; \
311-
__get_user_err((x), (ptr), __gu_err); \
311+
__get_user_err((x), (ptr), __gu_err, TUSER()); \
312312
__gu_err; \
313313
})
314314

315-
#define __get_user_err(x, ptr, err) \
315+
#define __get_user_err(x, ptr, err, __t) \
316316
do { \
317317
unsigned long __gu_addr = (unsigned long)(ptr); \
318318
unsigned long __gu_val; \
@@ -321,18 +321,19 @@ do { \
321321
might_fault(); \
322322
__ua_flags = uaccess_save_and_enable(); \
323323
switch (sizeof(*(ptr))) { \
324-
case 1: __get_user_asm_byte(__gu_val, __gu_addr, err); break; \
325-
case 2: __get_user_asm_half(__gu_val, __gu_addr, err); break; \
326-
case 4: __get_user_asm_word(__gu_val, __gu_addr, err); break; \
324+
case 1: __get_user_asm_byte(__gu_val, __gu_addr, err, __t); break; \
325+
case 2: __get_user_asm_half(__gu_val, __gu_addr, err, __t); break; \
326+
case 4: __get_user_asm_word(__gu_val, __gu_addr, err, __t); break; \
327327
default: (__gu_val) = __get_user_bad(); \
328328
} \
329329
uaccess_restore(__ua_flags); \
330330
(x) = (__typeof__(*(ptr)))__gu_val; \
331331
} while (0)
332+
#endif
332333

333334
#define __get_user_asm(x, addr, err, instr) \
334335
__asm__ __volatile__( \
335-
"1: " TUSER(instr) " %1, [%2], #0\n" \
336+
"1: " instr " %1, [%2], #0\n" \
336337
"2:\n" \
337338
" .pushsection .text.fixup,\"ax\"\n" \
338339
" .align 2\n" \
@@ -348,40 +349,38 @@ do { \
348349
: "r" (addr), "i" (-EFAULT) \
349350
: "cc")
350351

351-
#define __get_user_asm_byte(x, addr, err) \
352-
__get_user_asm(x, addr, err, ldrb)
352+
#define __get_user_asm_byte(x, addr, err, __t) \
353+
__get_user_asm(x, addr, err, "ldrb" __t)
353354

354355
#if __LINUX_ARM_ARCH__ >= 6
355356

356-
#define __get_user_asm_half(x, addr, err) \
357-
__get_user_asm(x, addr, err, ldrh)
357+
#define __get_user_asm_half(x, addr, err, __t) \
358+
__get_user_asm(x, addr, err, "ldrh" __t)
358359

359360
#else
360361

361362
#ifndef __ARMEB__
362-
#define __get_user_asm_half(x, __gu_addr, err) \
363+
#define __get_user_asm_half(x, __gu_addr, err, __t) \
363364
({ \
364365
unsigned long __b1, __b2; \
365-
__get_user_asm_byte(__b1, __gu_addr, err); \
366-
__get_user_asm_byte(__b2, __gu_addr + 1, err); \
366+
__get_user_asm_byte(__b1, __gu_addr, err, __t); \
367+
__get_user_asm_byte(__b2, __gu_addr + 1, err, __t); \
367368
(x) = __b1 | (__b2 << 8); \
368369
})
369370
#else
370-
#define __get_user_asm_half(x, __gu_addr, err) \
371+
#define __get_user_asm_half(x, __gu_addr, err, __t) \
371372
({ \
372373
unsigned long __b1, __b2; \
373-
__get_user_asm_byte(__b1, __gu_addr, err); \
374-
__get_user_asm_byte(__b2, __gu_addr + 1, err); \
374+
__get_user_asm_byte(__b1, __gu_addr, err, __t); \
375+
__get_user_asm_byte(__b2, __gu_addr + 1, err, __t); \
375376
(x) = (__b1 << 8) | __b2; \
376377
})
377378
#endif
378379

379380
#endif /* __LINUX_ARM_ARCH__ >= 6 */
380381

381-
#define __get_user_asm_word(x, addr, err) \
382-
__get_user_asm(x, addr, err, ldr)
383-
#endif
384-
382+
#define __get_user_asm_word(x, addr, err, __t) \
383+
__get_user_asm(x, addr, err, "ldr" __t)
385384

386385
#define __put_user_switch(x, ptr, __err, __fn) \
387386
do { \
@@ -425,17 +424,19 @@ do { \
425424
#define __put_user_nocheck(x, __pu_ptr, __err, __size) \
426425
do { \
427426
unsigned long __pu_addr = (unsigned long)__pu_ptr; \
428-
__put_user_nocheck_##__size(x, __pu_addr, __err); \
427+
__put_user_nocheck_##__size(x, __pu_addr, __err, TUSER());\
429428
} while (0)
430429

431430
#define __put_user_nocheck_1 __put_user_asm_byte
432431
#define __put_user_nocheck_2 __put_user_asm_half
433432
#define __put_user_nocheck_4 __put_user_asm_word
434433
#define __put_user_nocheck_8 __put_user_asm_dword
435434

435+
#endif /* !CONFIG_CPU_SPECTRE */
436+
436437
#define __put_user_asm(x, __pu_addr, err, instr) \
437438
__asm__ __volatile__( \
438-
"1: " TUSER(instr) " %1, [%2], #0\n" \
439+
"1: " instr " %1, [%2], #0\n" \
439440
"2:\n" \
440441
" .pushsection .text.fixup,\"ax\"\n" \
441442
" .align 2\n" \
@@ -450,36 +451,36 @@ do { \
450451
: "r" (x), "r" (__pu_addr), "i" (-EFAULT) \
451452
: "cc")
452453

453-
#define __put_user_asm_byte(x, __pu_addr, err) \
454-
__put_user_asm(x, __pu_addr, err, strb)
454+
#define __put_user_asm_byte(x, __pu_addr, err, __t) \
455+
__put_user_asm(x, __pu_addr, err, "strb" __t)
455456

456457
#if __LINUX_ARM_ARCH__ >= 6
457458

458-
#define __put_user_asm_half(x, __pu_addr, err) \
459-
__put_user_asm(x, __pu_addr, err, strh)
459+
#define __put_user_asm_half(x, __pu_addr, err, __t) \
460+
__put_user_asm(x, __pu_addr, err, "strh" __t)
460461

461462
#else
462463

463464
#ifndef __ARMEB__
464-
#define __put_user_asm_half(x, __pu_addr, err) \
465+
#define __put_user_asm_half(x, __pu_addr, err, __t) \
465466
({ \
466467
unsigned long __temp = (__force unsigned long)(x); \
467-
__put_user_asm_byte(__temp, __pu_addr, err); \
468-
__put_user_asm_byte(__temp >> 8, __pu_addr + 1, err); \
468+
__put_user_asm_byte(__temp, __pu_addr, err, __t); \
469+
__put_user_asm_byte(__temp >> 8, __pu_addr + 1, err, __t);\
469470
})
470471
#else
471-
#define __put_user_asm_half(x, __pu_addr, err) \
472+
#define __put_user_asm_half(x, __pu_addr, err, __t) \
472473
({ \
473474
unsigned long __temp = (__force unsigned long)(x); \
474-
__put_user_asm_byte(__temp >> 8, __pu_addr, err); \
475-
__put_user_asm_byte(__temp, __pu_addr + 1, err); \
475+
__put_user_asm_byte(__temp >> 8, __pu_addr, err, __t); \
476+
__put_user_asm_byte(__temp, __pu_addr + 1, err, __t); \
476477
})
477478
#endif
478479

479480
#endif /* __LINUX_ARM_ARCH__ >= 6 */
480481

481-
#define __put_user_asm_word(x, __pu_addr, err) \
482-
__put_user_asm(x, __pu_addr, err, str)
482+
#define __put_user_asm_word(x, __pu_addr, err, __t) \
483+
__put_user_asm(x, __pu_addr, err, "str" __t)
483484

484485
#ifndef __ARMEB__
485486
#define __reg_oper0 "%R2"
@@ -489,12 +490,12 @@ do { \
489490
#define __reg_oper1 "%R2"
490491
#endif
491492

492-
#define __put_user_asm_dword(x, __pu_addr, err) \
493+
#define __put_user_asm_dword(x, __pu_addr, err, __t) \
493494
__asm__ __volatile__( \
494-
ARM( "1: " TUSER(str) " " __reg_oper1 ", [%1], #4\n" ) \
495-
ARM( "2: " TUSER(str) " " __reg_oper0 ", [%1]\n" ) \
496-
THUMB( "1: " TUSER(str) " " __reg_oper1 ", [%1]\n" ) \
497-
THUMB( "2: " TUSER(str) " " __reg_oper0 ", [%1, #4]\n" ) \
495+
ARM( "1: str" __t " " __reg_oper1 ", [%1], #4\n" ) \
496+
ARM( "2: str" __t " " __reg_oper0 ", [%1]\n" ) \
497+
THUMB( "1: str" __t " " __reg_oper1 ", [%1]\n" ) \
498+
THUMB( "2: str" __t " " __reg_oper0 ", [%1, #4]\n" ) \
498499
"3:\n" \
499500
" .pushsection .text.fixup,\"ax\"\n" \
500501
" .align 2\n" \
@@ -510,7 +511,49 @@ do { \
510511
: "r" (x), "i" (-EFAULT) \
511512
: "cc")
512513

513-
#endif /* !CONFIG_CPU_SPECTRE */
514+
#define HAVE_GET_KERNEL_NOFAULT
515+
516+
#define __get_kernel_nofault(dst, src, type, err_label) \
517+
do { \
518+
const type *__pk_ptr = (src); \
519+
unsigned long __src = (unsigned long)(__pk_ptr); \
520+
type __val; \
521+
int __err = 0; \
522+
switch (sizeof(type)) { \
523+
case 1: __get_user_asm_byte(__val, __src, __err, ""); break; \
524+
case 2: __get_user_asm_half(__val, __src, __err, ""); break; \
525+
case 4: __get_user_asm_word(__val, __src, __err, ""); break; \
526+
case 8: { \
527+
u32 *__v32 = (u32*)&__val; \
528+
__get_user_asm_word(__v32[0], __src, __err, ""); \
529+
if (__err) \
530+
break; \
531+
__get_user_asm_word(__v32[1], __src+4, __err, ""); \
532+
break; \
533+
} \
534+
default: __err = __get_user_bad(); break; \
535+
} \
536+
*(type *)(dst) = __val; \
537+
if (__err) \
538+
goto err_label; \
539+
} while (0)
540+
541+
#define __put_kernel_nofault(dst, src, type, err_label) \
542+
do { \
543+
const type *__pk_ptr = (dst); \
544+
unsigned long __dst = (unsigned long)__pk_ptr; \
545+
int __err = 0; \
546+
type __val = *(type *)src; \
547+
switch (sizeof(type)) { \
548+
case 1: __put_user_asm_byte(__val, __dst, __err, ""); break; \
549+
case 2: __put_user_asm_half(__val, __dst, __err, ""); break; \
550+
case 4: __put_user_asm_word(__val, __dst, __err, ""); break; \
551+
case 8: __put_user_asm_dword(__val, __dst, __err, ""); break; \
552+
default: __err = __put_user_bad(); break; \
553+
} \
554+
if (__err) \
555+
goto err_label; \
556+
} while (0)
514557

515558
#ifdef CONFIG_MMU
516559
extern unsigned long __must_check

0 commit comments

Comments
 (0)