|
8 | 8 | #include <asm/mmu_context.h>
|
9 | 9 | #include <asm/errno.h>
|
10 | 10 |
|
11 |
| -#define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \ |
12 |
| - asm volatile( \ |
13 |
| - " sacf 256\n" \ |
14 |
| - "0: l %1,0(%6)\n" \ |
15 |
| - "1:"insn \ |
16 |
| - "2: cs %1,%2,0(%6)\n" \ |
17 |
| - "3: jl 1b\n" \ |
18 |
| - " lhi %0,0\n" \ |
19 |
| - "4: sacf 768\n" \ |
20 |
| - EX_TABLE(0b,4b) EX_TABLE(1b,4b) \ |
21 |
| - EX_TABLE(2b,4b) EX_TABLE(3b,4b) \ |
22 |
| - : "=d" (ret), "=&d" (oldval), "=&d" (newval), \ |
23 |
| - "=m" (*uaddr) \ |
24 |
| - : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ |
25 |
| - "m" (*uaddr) : "cc"); |
| 11 | +#define FUTEX_OP_FUNC(name, insn) \ |
| 12 | +static inline int \ |
| 13 | +__futex_atomic_##name(int oparg, int *old, u32 __user *uaddr) \ |
| 14 | +{ \ |
| 15 | + int rc, new; \ |
| 16 | + \ |
| 17 | + asm_inline volatile( \ |
| 18 | + " sacf 256\n" \ |
| 19 | + "0: l %[old],%[uaddr]\n" \ |
| 20 | + "1:"insn \ |
| 21 | + "2: cs %[old],%[new],%[uaddr]\n" \ |
| 22 | + "3: jl 1b\n" \ |
| 23 | + " lhi %[rc],0\n" \ |
| 24 | + "4: sacf 768\n" \ |
| 25 | + EX_TABLE_UA_FAULT(0b, 4b, %[rc]) \ |
| 26 | + EX_TABLE_UA_FAULT(1b, 4b, %[rc]) \ |
| 27 | + EX_TABLE_UA_FAULT(2b, 4b, %[rc]) \ |
| 28 | + EX_TABLE_UA_FAULT(3b, 4b, %[rc]) \ |
| 29 | + : [rc] "=d" (rc), [old] "=&d" (*old), \ |
| 30 | + [new] "=&d" (new), [uaddr] "+Q" (*uaddr) \ |
| 31 | + : [oparg] "d" (oparg) \ |
| 32 | + : "cc"); \ |
| 33 | + return rc; \ |
| 34 | +} |
| 35 | + |
| 36 | +FUTEX_OP_FUNC(set, "lr %[new],%[oparg]\n") |
| 37 | +FUTEX_OP_FUNC(add, "lr %[new],%[old]\n ar %[new],%[oparg]\n") |
| 38 | +FUTEX_OP_FUNC(or, "lr %[new],%[old]\n or %[new],%[oparg]\n") |
| 39 | +FUTEX_OP_FUNC(and, "lr %[new],%[old]\n nr %[new],%[oparg]\n") |
| 40 | +FUTEX_OP_FUNC(xor, "lr %[new],%[old]\n xr %[new],%[oparg]\n") |
26 | 41 |
|
27 |
| -static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, |
28 |
| - u32 __user *uaddr) |
| 42 | +static inline |
| 43 | +int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr) |
29 | 44 | {
|
30 |
| - int oldval = 0, newval, ret; |
| 45 | + int old, rc; |
31 | 46 |
|
32 | 47 | switch (op) {
|
33 | 48 | case FUTEX_OP_SET:
|
34 |
| - __futex_atomic_op("lr %2,%5\n", |
35 |
| - ret, oldval, newval, uaddr, oparg); |
| 49 | + rc = __futex_atomic_set(oparg, &old, uaddr); |
36 | 50 | break;
|
37 | 51 | case FUTEX_OP_ADD:
|
38 |
| - __futex_atomic_op("lr %2,%1\nar %2,%5\n", |
39 |
| - ret, oldval, newval, uaddr, oparg); |
| 52 | + rc = __futex_atomic_add(oparg, &old, uaddr); |
40 | 53 | break;
|
41 | 54 | case FUTEX_OP_OR:
|
42 |
| - __futex_atomic_op("lr %2,%1\nor %2,%5\n", |
43 |
| - ret, oldval, newval, uaddr, oparg); |
| 55 | + rc = __futex_atomic_or(oparg, &old, uaddr); |
44 | 56 | break;
|
45 | 57 | case FUTEX_OP_ANDN:
|
46 |
| - __futex_atomic_op("lr %2,%1\nnr %2,%5\n", |
47 |
| - ret, oldval, newval, uaddr, ~oparg); |
| 58 | + rc = __futex_atomic_and(~oparg, &old, uaddr); |
48 | 59 | break;
|
49 | 60 | case FUTEX_OP_XOR:
|
50 |
| - __futex_atomic_op("lr %2,%1\nxr %2,%5\n", |
51 |
| - ret, oldval, newval, uaddr, oparg); |
| 61 | + rc = __futex_atomic_xor(oparg, &old, uaddr); |
52 | 62 | break;
|
53 | 63 | default:
|
54 |
| - ret = -ENOSYS; |
| 64 | + rc = -ENOSYS; |
55 | 65 | }
|
56 |
| - |
57 |
| - if (!ret) |
58 |
| - *oval = oldval; |
59 |
| - |
60 |
| - return ret; |
| 66 | + if (!rc) |
| 67 | + *oval = old; |
| 68 | + return rc; |
61 | 69 | }
|
62 | 70 |
|
63 | 71 | static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
|
0 commit comments