Skip to content

Commit 563adbf

Browse files
david-laighttorvalds
authored andcommitted
locking/osq_lock: Clarify osq_wait_next() calling convention
osq_wait_next() is passed 'prev' from osq_lock() and NULL from osq_unlock() but only needs the 'cpu' value to write to lock->tail. Just pass prev->cpu or OSQ_UNLOCKED_VAL instead. Should have no effect on the generated code since gcc manages to assume that 'prev != NULL' due to an earlier dereference. Signed-off-by: David Laight <david.laight@aculab.com> [ Changed 'old' to 'old_cpu' by request from Waiman Long - Linus ] Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent 7c22309 commit 563adbf

File tree

1 file changed

+9
-12
lines changed

1 file changed

+9
-12
lines changed

kernel/locking/osq_lock.c

Lines changed: 9 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -44,26 +44,23 @@ static inline struct optimistic_spin_node *decode_cpu(int encoded_cpu_val)
4444
/*
4545
* Get a stable @node->next pointer, either for unlock() or unqueue() purposes.
4646
* Can return NULL in case we were the last queued and we updated @lock instead.
47+
*
48+
* If osq_lock() is being cancelled there must be a previous node
49+
* and 'old_cpu' is its CPU #.
50+
* For osq_unlock() there is never a previous node and old_cpu is
51+
* set to OSQ_UNLOCKED_VAL.
4752
*/
4853
static inline struct optimistic_spin_node *
4954
osq_wait_next(struct optimistic_spin_queue *lock,
5055
struct optimistic_spin_node *node,
51-
struct optimistic_spin_node *prev)
56+
int old_cpu)
5257
{
5358
struct optimistic_spin_node *next = NULL;
5459
int curr = encode_cpu(smp_processor_id());
55-
int old;
56-
57-
/*
58-
* If there is a prev node in queue, then the 'old' value will be
59-
* the prev node's CPU #, else it's set to OSQ_UNLOCKED_VAL since if
60-
* we're currently last in queue, then the queue will then become empty.
61-
*/
62-
old = prev ? prev->cpu : OSQ_UNLOCKED_VAL;
6360

6461
for (;;) {
6562
if (atomic_read(&lock->tail) == curr &&
66-
atomic_cmpxchg_acquire(&lock->tail, curr, old) == curr) {
63+
atomic_cmpxchg_acquire(&lock->tail, curr, old_cpu) == curr) {
6764
/*
6865
* We were the last queued, we moved @lock back. @prev
6966
* will now observe @lock and will complete its
@@ -193,7 +190,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
193190
* back to @prev.
194191
*/
195192

196-
next = osq_wait_next(lock, node, prev);
193+
next = osq_wait_next(lock, node, prev->cpu);
197194
if (!next)
198195
return false;
199196

@@ -233,7 +230,7 @@ void osq_unlock(struct optimistic_spin_queue *lock)
233230
return;
234231
}
235232

236-
next = osq_wait_next(lock, node, NULL);
233+
next = osq_wait_next(lock, node, OSQ_UNLOCKED_VAL);
237234
if (next)
238235
WRITE_ONCE(next->locked, 1);
239236
}

0 commit comments

Comments
 (0)