Skip to content
This repository was archived by the owner on Nov 8, 2023. It is now read-only.

Commit 151647a

Browse files
committed
Merge tag 'locking-core-2024-07-15' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar: - Jump label fixes, including a perf events fix that originally manifested as jump label failures, but was a serialization bug at the usage site - Mark down_write*() helpers as __always_inline, to improve WCHAN debuggability - Misc cleanups and fixes * tag 'locking-core-2024-07-15' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: locking/rwsem: Add __always_inline annotation to __down_write_common() and inlined callers jump_label: Simplify and clarify static_key_fast_inc_cpus_locked() jump_label: Clarify condition in static_key_fast_inc_not_disabled() jump_label: Fix concurrency issues in static_key_slow_dec() perf/x86: Serialize set_attr_rdpmc() cleanup: Standardize the header guard define's name
2 parents 923a327 + e81859f commit 151647a

File tree

4 files changed

+55
-34
lines changed

4 files changed

+55
-34
lines changed

arch/x86/events/core.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2547,6 +2547,7 @@ static ssize_t set_attr_rdpmc(struct device *cdev,
25472547
struct device_attribute *attr,
25482548
const char *buf, size_t count)
25492549
{
2550+
static DEFINE_MUTEX(rdpmc_mutex);
25502551
unsigned long val;
25512552
ssize_t ret;
25522553

@@ -2560,6 +2561,8 @@ static ssize_t set_attr_rdpmc(struct device *cdev,
25602561
if (x86_pmu.attr_rdpmc_broken)
25612562
return -ENOTSUPP;
25622563

2564+
guard(mutex)(&rdpmc_mutex);
2565+
25632566
if (val != x86_pmu.attr_rdpmc) {
25642567
/*
25652568
* Changing into or out of never available or always available,

include/linux/cleanup.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
/* SPDX-License-Identifier: GPL-2.0 */
2-
#ifndef __LINUX_GUARDS_H
3-
#define __LINUX_GUARDS_H
2+
#ifndef _LINUX_CLEANUP_H
3+
#define _LINUX_CLEANUP_H
44

55
#include <linux/compiler.h>
66

@@ -250,4 +250,4 @@ __DEFINE_LOCK_GUARD_0(_name, _lock)
250250
{ return class_##_name##_lock_ptr(_T); }
251251

252252

253-
#endif /* __LINUX_GUARDS_H */
253+
#endif /* _LINUX_CLEANUP_H */

kernel/jump_label.c

Lines changed: 46 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -131,13 +131,16 @@ bool static_key_fast_inc_not_disabled(struct static_key *key)
131131
STATIC_KEY_CHECK_USE(key);
132132
/*
133133
* Negative key->enabled has a special meaning: it sends
134-
* static_key_slow_inc() down the slow path, and it is non-zero
135-
* so it counts as "enabled" in jump_label_update(). Note that
136-
* atomic_inc_unless_negative() checks >= 0, so roll our own.
134+
* static_key_slow_inc/dec() down the slow path, and it is non-zero
135+
* so it counts as "enabled" in jump_label_update().
136+
*
137+
* The INT_MAX overflow condition is either used by the networking
138+
* code to reset or detected in the slow path of
139+
* static_key_slow_inc_cpuslocked().
137140
*/
138141
v = atomic_read(&key->enabled);
139142
do {
140-
if (v <= 0 || (v + 1) < 0)
143+
if (v <= 0 || v == INT_MAX)
141144
return false;
142145
} while (!likely(atomic_try_cmpxchg(&key->enabled, &v, v + 1)));
143146

@@ -150,7 +153,7 @@ bool static_key_slow_inc_cpuslocked(struct static_key *key)
150153
lockdep_assert_cpus_held();
151154

152155
/*
153-
* Careful if we get concurrent static_key_slow_inc() calls;
156+
* Careful if we get concurrent static_key_slow_inc/dec() calls;
154157
* later calls must wait for the first one to _finish_ the
155158
* jump_label_update() process. At the same time, however,
156159
* the jump_label_update() call below wants to see
@@ -159,22 +162,24 @@ bool static_key_slow_inc_cpuslocked(struct static_key *key)
159162
if (static_key_fast_inc_not_disabled(key))
160163
return true;
161164

162-
jump_label_lock();
163-
if (atomic_read(&key->enabled) == 0) {
164-
atomic_set(&key->enabled, -1);
165+
guard(mutex)(&jump_label_mutex);
166+
/* Try to mark it as 'enabling in progress. */
167+
if (!atomic_cmpxchg(&key->enabled, 0, -1)) {
165168
jump_label_update(key);
166169
/*
167-
* Ensure that if the above cmpxchg loop observes our positive
168-
* value, it must also observe all the text changes.
170+
* Ensure that when static_key_fast_inc_not_disabled() or
171+
* static_key_slow_try_dec() observe the positive value,
172+
* they must also observe all the text changes.
169173
*/
170174
atomic_set_release(&key->enabled, 1);
171175
} else {
172-
if (WARN_ON_ONCE(!static_key_fast_inc_not_disabled(key))) {
173-
jump_label_unlock();
176+
/*
177+
* While holding the mutex this should never observe
178+
* anything else than a value >= 1 and succeed
179+
*/
180+
if (WARN_ON_ONCE(!static_key_fast_inc_not_disabled(key)))
174181
return false;
175-
}
176182
}
177-
jump_label_unlock();
178183
return true;
179184
}
180185

@@ -247,20 +252,32 @@ EXPORT_SYMBOL_GPL(static_key_disable);
247252

248253
static bool static_key_slow_try_dec(struct static_key *key)
249254
{
250-
int val;
251-
252-
val = atomic_fetch_add_unless(&key->enabled, -1, 1);
253-
if (val == 1)
254-
return false;
255+
int v;
255256

256257
/*
257-
* The negative count check is valid even when a negative
258-
* key->enabled is in use by static_key_slow_inc(); a
259-
* __static_key_slow_dec() before the first static_key_slow_inc()
260-
* returns is unbalanced, because all other static_key_slow_inc()
261-
* instances block while the update is in progress.
258+
* Go into the slow path if key::enabled is less than or equal than
259+
* one. One is valid to shut down the key, anything less than one
260+
* is an imbalance, which is handled at the call site.
261+
*
262+
* That includes the special case of '-1' which is set in
263+
* static_key_slow_inc_cpuslocked(), but that's harmless as it is
264+
* fully serialized in the slow path below. By the time this task
265+
* acquires the jump label lock the value is back to one and the
266+
* retry under the lock must succeed.
262267
*/
263-
WARN(val < 0, "jump label: negative count!\n");
268+
v = atomic_read(&key->enabled);
269+
do {
270+
/*
271+
* Warn about the '-1' case though; since that means a
272+
* decrement is concurrent with a first (0->1) increment. IOW
273+
* people are trying to disable something that wasn't yet fully
274+
* enabled. This suggests an ordering problem on the user side.
275+
*/
276+
WARN_ON_ONCE(v < 0);
277+
if (v <= 1)
278+
return false;
279+
} while (!likely(atomic_try_cmpxchg(&key->enabled, &v, v - 1)));
280+
264281
return true;
265282
}
266283

@@ -271,10 +288,11 @@ static void __static_key_slow_dec_cpuslocked(struct static_key *key)
271288
if (static_key_slow_try_dec(key))
272289
return;
273290

274-
jump_label_lock();
275-
if (atomic_dec_and_test(&key->enabled))
291+
guard(mutex)(&jump_label_mutex);
292+
if (atomic_cmpxchg(&key->enabled, 1, 0))
276293
jump_label_update(key);
277-
jump_label_unlock();
294+
else
295+
WARN_ON_ONCE(!static_key_slow_try_dec(key));
278296
}
279297

280298
static void __static_key_slow_dec(struct static_key *key)

kernel/locking/rwsem.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1297,7 +1297,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
12971297
/*
12981298
* lock for writing
12991299
*/
1300-
static inline int __down_write_common(struct rw_semaphore *sem, int state)
1300+
static __always_inline int __down_write_common(struct rw_semaphore *sem, int state)
13011301
{
13021302
int ret = 0;
13031303

@@ -1310,12 +1310,12 @@ static inline int __down_write_common(struct rw_semaphore *sem, int state)
13101310
return ret;
13111311
}
13121312

1313-
static inline void __down_write(struct rw_semaphore *sem)
1313+
static __always_inline void __down_write(struct rw_semaphore *sem)
13141314
{
13151315
__down_write_common(sem, TASK_UNINTERRUPTIBLE);
13161316
}
13171317

1318-
static inline int __down_write_killable(struct rw_semaphore *sem)
1318+
static __always_inline int __down_write_killable(struct rw_semaphore *sem)
13191319
{
13201320
return __down_write_common(sem, TASK_KILLABLE);
13211321
}

0 commit comments

Comments
 (0)