|
6 | 6 | #include <linux/rtmutex.h>
|
7 | 7 | #include <linux/sched/wake_q.h>
|
8 | 8 | #include <linux/compat.h>
|
| 9 | +#include <linux/uaccess.h> |
9 | 10 |
|
10 | 11 | #ifdef CONFIG_PREEMPT_RT
|
11 | 12 | #include <linux/rcuwait.h>
|
@@ -225,10 +226,64 @@ extern bool __futex_wake_mark(struct futex_q *q);
|
225 | 226 | extern void futex_wake_mark(struct wake_q_head *wake_q, struct futex_q *q);
|
226 | 227 |
|
227 | 228 | extern int fault_in_user_writeable(u32 __user *uaddr);
|
228 |
| -extern int futex_cmpxchg_value_locked(u32 *curval, u32 __user *uaddr, u32 uval, u32 newval); |
229 |
| -extern int futex_get_value_locked(u32 *dest, u32 __user *from); |
230 | 229 | extern struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb, union futex_key *key);
|
231 | 230 |
|
| 231 | +static inline int futex_cmpxchg_value_locked(u32 *curval, u32 __user *uaddr, u32 uval, u32 newval) |
| 232 | +{ |
| 233 | + int ret; |
| 234 | + |
| 235 | + pagefault_disable(); |
| 236 | + ret = futex_atomic_cmpxchg_inatomic(curval, uaddr, uval, newval); |
| 237 | + pagefault_enable(); |
| 238 | + |
| 239 | + return ret; |
| 240 | +} |
| 241 | + |
| 242 | +/* |
| 243 | + * This does a plain atomic user space read, and the user pointer has |
| 244 | + * already been verified earlier by get_futex_key() to be both aligned |
| 245 | + * and actually in user space, just like futex_atomic_cmpxchg_inatomic(). |
| 246 | + * |
| 247 | + * We still want to avoid any speculation, and while __get_user() is |
| 248 | + * the traditional model for this, it's actually slower than doing |
| 249 | + * this manually these days. |
| 250 | + * |
| 251 | + * We could just have a per-architecture special function for it, |
| 252 | + * the same way we do futex_atomic_cmpxchg_inatomic(), but rather |
| 253 | + * than force everybody to do that, write it out long-hand using |
| 254 | + * the low-level user-access infrastructure. |
| 255 | + * |
| 256 | + * This looks a bit overkill, but generally just results in a couple |
| 257 | + * of instructions. |
| 258 | + */ |
| 259 | +static __always_inline int futex_read_inatomic(u32 *dest, u32 __user *from) |
| 260 | +{ |
| 261 | + u32 val; |
| 262 | + |
| 263 | + if (can_do_masked_user_access()) |
| 264 | + from = masked_user_access_begin(from); |
| 265 | + else if (!user_read_access_begin(from, sizeof(*from))) |
| 266 | + return -EFAULT; |
| 267 | + unsafe_get_user(val, from, Efault); |
| 268 | + user_access_end(); |
| 269 | + *dest = val; |
| 270 | + return 0; |
| 271 | +Efault: |
| 272 | + user_access_end(); |
| 273 | + return -EFAULT; |
| 274 | +} |
| 275 | + |
| 276 | +static inline int futex_get_value_locked(u32 *dest, u32 __user *from) |
| 277 | +{ |
| 278 | + int ret; |
| 279 | + |
| 280 | + pagefault_disable(); |
| 281 | + ret = futex_read_inatomic(dest, from); |
| 282 | + pagefault_enable(); |
| 283 | + |
| 284 | + return ret; |
| 285 | +} |
| 286 | + |
232 | 287 | extern void __futex_unqueue(struct futex_q *q);
|
233 | 288 | extern void __futex_queue(struct futex_q *q, struct futex_hash_bucket *hb);
|
234 | 289 | extern int futex_unqueue(struct futex_q *q);
|
|
0 commit comments