|
6 | 6 | * (C) Copyright 2013-2014,2018 Red Hat, Inc.
|
7 | 7 | * (C) Copyright 2015 Intel Corp.
|
8 | 8 | * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP
|
| 9 | + * (C) Copyright 2024-2025 Meta Platforms, Inc. and affiliates. |
9 | 10 | *
|
10 | 11 | * Authors: Waiman Long <longman@redhat.com>
|
11 | 12 | * Peter Zijlstra <peterz@infradead.org>
|
| 13 | + * Kumar Kartikeya Dwivedi <memxor@gmail.com> |
12 | 14 | */
|
13 | 15 |
|
14 | 16 | #include <linux/smp.h>
|
|
22 | 24 | #include <asm/qspinlock.h>
|
23 | 25 | #include <trace/events/lock.h>
|
24 | 26 | #include <asm/rqspinlock.h>
|
| 27 | +#include <linux/timekeeping.h> |
25 | 28 |
|
26 | 29 | /*
|
27 | 30 | * Include queued spinlock definitions and statistics code
|
|
68 | 71 |
|
69 | 72 | #include "../locking/mcs_spinlock.h"
|
70 | 73 |
|
| 74 | +struct rqspinlock_timeout { |
| 75 | + u64 timeout_end; |
| 76 | + u64 duration; |
| 77 | + u16 spin; |
| 78 | +}; |
| 79 | + |
| 80 | +static noinline int check_timeout(struct rqspinlock_timeout *ts) |
| 81 | +{ |
| 82 | + u64 time = ktime_get_mono_fast_ns(); |
| 83 | + |
| 84 | + if (!ts->timeout_end) { |
| 85 | + ts->timeout_end = time + ts->duration; |
| 86 | + return 0; |
| 87 | + } |
| 88 | + |
| 89 | + if (time > ts->timeout_end) |
| 90 | + return -ETIMEDOUT; |
| 91 | + |
| 92 | + return 0; |
| 93 | +} |
| 94 | + |
| 95 | +#define RES_CHECK_TIMEOUT(ts, ret) \ |
| 96 | + ({ \ |
| 97 | + if (!(ts).spin++) \ |
| 98 | + (ret) = check_timeout(&(ts)); \ |
| 99 | + (ret); \ |
| 100 | + }) |
| 101 | + |
| 102 | +/* |
| 103 | + * Initialize the 'spin' member. |
| 104 | + */ |
| 105 | +#define RES_INIT_TIMEOUT(ts) ({ (ts).spin = 1; }) |
| 106 | + |
| 107 | +/* |
| 108 | + * We only need to reset 'timeout_end', 'spin' will just wrap around as necessary. |
| 109 | + * Duration is defined for each spin attempt, so set it here. |
| 110 | + */ |
| 111 | +#define RES_RESET_TIMEOUT(ts, _duration) ({ (ts).timeout_end = 0; (ts).duration = _duration; }) |
| 112 | + |
71 | 113 | /*
|
72 | 114 | * Per-CPU queue node structures; we can never have more than 4 nested
|
73 | 115 | * contexts: task, softirq, hardirq, nmi.
|
@@ -100,11 +142,14 @@ static DEFINE_PER_CPU_ALIGNED(struct qnode, rqnodes[_Q_MAX_NODES]);
|
100 | 142 | void __lockfunc resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val)
|
101 | 143 | {
|
102 | 144 | struct mcs_spinlock *prev, *next, *node;
|
| 145 | + struct rqspinlock_timeout ts; |
103 | 146 | u32 old, tail;
|
104 | 147 | int idx;
|
105 | 148 |
|
106 | 149 | BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
|
107 | 150 |
|
| 151 | + RES_INIT_TIMEOUT(ts); |
| 152 | + |
108 | 153 | /*
|
109 | 154 | * Wait for in-progress pending->locked hand-overs with a bounded
|
110 | 155 | * number of spins so that we guarantee forward progress.
|
|
0 commit comments