|
21 | 21 | #include <linux/mutex.h>
|
22 | 22 | #include <linux/prefetch.h>
|
23 | 23 | #include <asm/byteorder.h>
|
| 24 | +#ifdef CONFIG_QUEUED_SPINLOCKS |
24 | 25 | #include <asm/qspinlock.h>
|
| 26 | +#endif |
25 | 27 | #include <trace/events/lock.h>
|
26 | 28 | #include <asm/rqspinlock.h>
|
27 | 29 | #include <linux/timekeeping.h>
|
28 | 30 |
|
29 | 31 | /*
|
30 | 32 | * Include queued spinlock definitions and statistics code
|
31 | 33 | */
|
| 34 | +#ifdef CONFIG_QUEUED_SPINLOCKS |
32 | 35 | #include "../locking/qspinlock.h"
|
33 | 36 | #include "../locking/lock_events.h"
|
34 | 37 | #include "rqspinlock.h"
|
| 38 | +#include "../locking/mcs_spinlock.h" |
| 39 | +#endif |
35 | 40 |
|
36 | 41 | /*
|
37 | 42 | * The basic principle of a queue-based spinlock can best be understood
|
|
70 | 75 | *
|
71 | 76 | */
|
72 | 77 |
|
73 |
| -#include "../locking/mcs_spinlock.h" |
74 |
| - |
75 | 78 | struct rqspinlock_timeout {
|
76 | 79 | u64 timeout_end;
|
77 | 80 | u64 duration;
|
@@ -263,6 +266,43 @@ static noinline int check_timeout(rqspinlock_t *lock, u32 mask,
|
263 | 266 | */
|
264 | 267 | #define RES_RESET_TIMEOUT(ts, _duration) ({ (ts).timeout_end = 0; (ts).duration = _duration; })
|
265 | 268 |
|
| 269 | +/* |
| 270 | + * Provide a test-and-set fallback for cases when queued spin lock support is |
| 271 | + * absent from the architecture. |
| 272 | + */ |
| 273 | +int __lockfunc resilient_tas_spin_lock(rqspinlock_t *lock) |
| 274 | +{ |
| 275 | + struct rqspinlock_timeout ts; |
| 276 | + int val, ret = 0; |
| 277 | + |
| 278 | + RES_INIT_TIMEOUT(ts); |
| 279 | + grab_held_lock_entry(lock); |
| 280 | + |
| 281 | + /* |
| 282 | + * Since the waiting loop's time is dependent on the amount of |
| 283 | + * contention, a short timeout unlike rqspinlock waiting loops |
| 284 | + * isn't enough. Choose a second as the timeout value. |
| 285 | + */ |
| 286 | + RES_RESET_TIMEOUT(ts, NSEC_PER_SEC); |
| 287 | +retry: |
| 288 | + val = atomic_read(&lock->val); |
| 289 | + |
| 290 | + if (val || !atomic_try_cmpxchg(&lock->val, &val, 1)) { |
| 291 | + if (RES_CHECK_TIMEOUT(ts, ret, ~0u)) |
| 292 | + goto out; |
| 293 | + cpu_relax(); |
| 294 | + goto retry; |
| 295 | + } |
| 296 | + |
| 297 | + return 0; |
| 298 | +out: |
| 299 | + release_held_lock_entry(); |
| 300 | + return ret; |
| 301 | +} |
| 302 | +EXPORT_SYMBOL_GPL(resilient_tas_spin_lock); |
| 303 | + |
| 304 | +#ifdef CONFIG_QUEUED_SPINLOCKS |
| 305 | + |
266 | 306 | /*
|
267 | 307 | * Per-CPU queue node structures; we can never have more than 4 nested
|
268 | 308 | * contexts: task, softirq, hardirq, nmi.
|
@@ -616,3 +656,5 @@ int __lockfunc resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val)
|
616 | 656 | return ret;
|
617 | 657 | }
|
618 | 658 | EXPORT_SYMBOL_GPL(resilient_queued_spin_lock_slowpath);
|
| 659 | + |
| 660 | +#endif /* CONFIG_QUEUED_SPINLOCKS */ |
0 commit comments