Skip to content

Commit 8838a1a

Browse files
committed
Merge tag 'locking-core-2025-01-20' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar: "Lockdep: - Improve and fix lockdep bitsize limits, clarify the Kconfig documentation (Carlos Llamas) - Fix lockdep build warning on Clang related to chain_hlock_class_idx() inlining (Andy Shevchenko) - Relax the requirements of PROVE_RAW_LOCK_NESTING arch support by not tying it to ARCH_SUPPORTS_RT unnecessarily (Waiman Long) Rust integration: - Support lock pointers managed by the C side (Lyude Paul) - Support guard types (Lyude Paul) - Update MAINTAINERS file filters to include the Rust locking code (Boqun Feng) Wake-queues: - Add raw_spin_*wake() helpers to simplify locking code (John Stultz) SMP cross-calls: - Fix potential data update race by evaluating the local cond_func() before IPI side-effects (Mathieu Desnoyers) Guard primitives: - Ease [c]tags based searches by including the cleanup/guard type primitives (Peter Zijlstra) ww_mutexes: - Simplify the ww_mutex self-test code via swap() (Thorsten Blum) Static calls: - Update the static calls MAINTAINERS file-pattern (Jiri Slaby)" * tag 'locking-core-2025-01-20' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: MAINTAINERS: Add static_call_inline.c to STATIC BRANCH/CALL cleanup, tags: Create tags for the cleanup primitives sched/wake_q: Add helper to call wake_up_q after unlock with preemption disabled rust: sync: Add lock::Backend::assert_is_held() rust: sync: Add SpinLockGuard type alias rust: sync: Add MutexGuard type alias rust: sync: Make Guard::new() public rust: sync: Add Lock::from_raw() for Lock<(), B> locking: MAINTAINERS: Start watching Rust locking primitives lockdep: Move lockdep_assert_locked() under #ifdef CONFIG_PROVE_LOCKING lockdep: Mark chain_hlock_class_idx() with __maybe_unused lockdep: Document MAX_LOCKDEP_CHAIN_HLOCKS calculation lockdep: Clarify size for LOCKDEP_*_BITS configs lockdep: Fix upper limit for LOCKDEP_*_BITS configs locking/ww_mutex/test: Use swap() macro smp/scf: Evaluate local cond_func() before IPI side-effects locking/lockdep: Enforce PROVE_RAW_LOCK_NESTING only if ARCH_SUPPORTS_RT
2 parents b9d8a29 + cb4ccc7 commit 8838a1a

File tree

17 files changed

+152
-69
lines changed

17 files changed

+152
-69
lines changed

MAINTAINERS

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13439,8 +13439,8 @@ LOCKING PRIMITIVES
1343913439
M: Peter Zijlstra <peterz@infradead.org>
1344013440
M: Ingo Molnar <mingo@redhat.com>
1344113441
M: Will Deacon <will@kernel.org>
13442+
M: Boqun Feng <boqun.feng@gmail.com> (LOCKDEP & RUST)
1344213443
R: Waiman Long <longman@redhat.com>
13443-
R: Boqun Feng <boqun.feng@gmail.com> (LOCKDEP)
1344413444
L: linux-kernel@vger.kernel.org
1344513445
S: Maintained
1344613446
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git locking/core
@@ -13454,6 +13454,11 @@ F: include/linux/seqlock.h
1345413454
F: include/linux/spinlock*.h
1345513455
F: kernel/locking/
1345613456
F: lib/locking*.[ch]
13457+
F: rust/helpers/mutex.c
13458+
F: rust/helpers/spinlock.c
13459+
F: rust/kernel/sync/lock.rs
13460+
F: rust/kernel/sync/lock/
13461+
F: rust/kernel/sync/locked_by.rs
1345713462
X: kernel/locking/locktorture.c
1345813463

1345913464
LOGICAL DISK MANAGER SUPPORT (LDM, Windows 2000/XP/Vista Dynamic Disks)
@@ -22467,7 +22472,7 @@ F: arch/*/kernel/static_call.c
2246722472
F: include/linux/jump_label*.h
2246822473
F: include/linux/static_call*.h
2246922474
F: kernel/jump_label.c
22470-
F: kernel/static_call.c
22475+
F: kernel/static_call*.c
2247122476

2247222477
STI AUDIO (ASoC) DRIVERS
2247322478
M: Arnaud Pouliquen <arnaud.pouliquen@foss.st.com>

include/linux/sched/wake_q.h

Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -63,4 +63,38 @@ extern void wake_q_add(struct wake_q_head *head, struct task_struct *task);
6363
extern void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task);
6464
extern void wake_up_q(struct wake_q_head *head);
6565

66+
/* Spin unlock helpers to unlock and call wake_up_q with preempt disabled */
67+
static inline
68+
void raw_spin_unlock_wake(raw_spinlock_t *lock, struct wake_q_head *wake_q)
69+
{
70+
guard(preempt)();
71+
raw_spin_unlock(lock);
72+
if (wake_q) {
73+
wake_up_q(wake_q);
74+
wake_q_init(wake_q);
75+
}
76+
}
77+
78+
static inline
79+
void raw_spin_unlock_irq_wake(raw_spinlock_t *lock, struct wake_q_head *wake_q)
80+
{
81+
guard(preempt)();
82+
raw_spin_unlock_irq(lock);
83+
if (wake_q) {
84+
wake_up_q(wake_q);
85+
wake_q_init(wake_q);
86+
}
87+
}
88+
89+
static inline
90+
void raw_spin_unlock_irqrestore_wake(raw_spinlock_t *lock, unsigned long flags,
91+
struct wake_q_head *wake_q)
92+
{
93+
guard(preempt)();
94+
raw_spin_unlock_irqrestore(lock, flags);
95+
if (wake_q) {
96+
wake_up_q(wake_q);
97+
wake_q_init(wake_q);
98+
}
99+
}
66100
#endif /* _LINUX_SCHED_WAKE_Q_H */

kernel/futex/pi.c

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1020,10 +1020,7 @@ int futex_lock_pi(u32 __user *uaddr, unsigned int flags, ktime_t *time, int tryl
10201020
* it sees the futex_q::pi_state.
10211021
*/
10221022
ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current, &wake_q);
1023-
preempt_disable();
1024-
raw_spin_unlock_irq(&q.pi_state->pi_mutex.wait_lock);
1025-
wake_up_q(&wake_q);
1026-
preempt_enable();
1023+
raw_spin_unlock_irq_wake(&q.pi_state->pi_mutex.wait_lock, &wake_q);
10271024

10281025
if (ret) {
10291026
if (ret == 1)

kernel/locking/lockdep.c

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -157,10 +157,12 @@ static inline void lockdep_unlock(void)
157157
__this_cpu_dec(lockdep_recursion);
158158
}
159159

160+
#ifdef CONFIG_PROVE_LOCKING
160161
static inline bool lockdep_assert_locked(void)
161162
{
162163
return DEBUG_LOCKS_WARN_ON(__owner != current);
163164
}
165+
#endif
164166

165167
static struct task_struct *lockdep_selftest_task_struct;
166168

@@ -430,7 +432,7 @@ static inline u16 hlock_id(struct held_lock *hlock)
430432
return (hlock->class_idx | (hlock->read << MAX_LOCKDEP_KEYS_BITS));
431433
}
432434

433-
static inline unsigned int chain_hlock_class_idx(u16 hlock_id)
435+
static inline __maybe_unused unsigned int chain_hlock_class_idx(u16 hlock_id)
434436
{
435437
return hlock_id & (MAX_LOCKDEP_KEYS - 1);
436438
}

kernel/locking/lockdep_internals.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -119,7 +119,8 @@ static const unsigned long LOCKF_USED_IN_IRQ_READ =
119119

120120
#define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS)
121121

122-
#define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
122+
#define AVG_LOCKDEP_CHAIN_DEPTH 5
123+
#define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS * AVG_LOCKDEP_CHAIN_DEPTH)
123124

124125
extern struct lock_chain lock_chains[];
125126

kernel/locking/mutex.c

Lines changed: 4 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -657,10 +657,7 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
657657
goto err;
658658
}
659659

660-
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
661-
/* Make sure we do wakeups before calling schedule */
662-
wake_up_q(&wake_q);
663-
wake_q_init(&wake_q);
660+
raw_spin_unlock_irqrestore_wake(&lock->wait_lock, flags, &wake_q);
664661

665662
schedule_preempt_disabled();
666663

@@ -710,8 +707,7 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
710707
if (ww_ctx)
711708
ww_mutex_lock_acquired(ww, ww_ctx);
712709

713-
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
714-
wake_up_q(&wake_q);
710+
raw_spin_unlock_irqrestore_wake(&lock->wait_lock, flags, &wake_q);
715711
preempt_enable();
716712
return 0;
717713

@@ -720,10 +716,9 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
720716
__mutex_remove_waiter(lock, &waiter);
721717
err_early_kill:
722718
trace_contention_end(lock, ret);
723-
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
719+
raw_spin_unlock_irqrestore_wake(&lock->wait_lock, flags, &wake_q);
724720
debug_mutex_free_waiter(&waiter);
725721
mutex_release(&lock->dep_map, ip);
726-
wake_up_q(&wake_q);
727722
preempt_enable();
728723
return ret;
729724
}
@@ -935,10 +930,7 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne
935930
if (owner & MUTEX_FLAG_HANDOFF)
936931
__mutex_handoff(lock, next);
937932

938-
preempt_disable();
939-
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
940-
wake_up_q(&wake_q);
941-
preempt_enable();
933+
raw_spin_unlock_irqrestore_wake(&lock->wait_lock, flags, &wake_q);
942934
}
943935

944936
#ifndef CONFIG_DEBUG_LOCK_ALLOC

kernel/locking/rtmutex.c

Lines changed: 5 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -1292,13 +1292,7 @@ static int __sched task_blocks_on_rt_mutex(struct rt_mutex_base *lock,
12921292
*/
12931293
get_task_struct(owner);
12941294

1295-
preempt_disable();
1296-
raw_spin_unlock_irq(&lock->wait_lock);
1297-
/* wake up any tasks on the wake_q before calling rt_mutex_adjust_prio_chain */
1298-
wake_up_q(wake_q);
1299-
wake_q_init(wake_q);
1300-
preempt_enable();
1301-
1295+
raw_spin_unlock_irq_wake(&lock->wait_lock, wake_q);
13021296

13031297
res = rt_mutex_adjust_prio_chain(owner, chwalk, lock,
13041298
next_lock, waiter, task);
@@ -1642,13 +1636,7 @@ static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock,
16421636
owner = rt_mutex_owner(lock);
16431637
else
16441638
owner = NULL;
1645-
preempt_disable();
1646-
raw_spin_unlock_irq(&lock->wait_lock);
1647-
if (wake_q) {
1648-
wake_up_q(wake_q);
1649-
wake_q_init(wake_q);
1650-
}
1651-
preempt_enable();
1639+
raw_spin_unlock_irq_wake(&lock->wait_lock, wake_q);
16521640

16531641
if (!owner || !rtmutex_spin_on_owner(lock, waiter, owner))
16541642
rt_mutex_schedule();
@@ -1799,10 +1787,7 @@ static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock,
17991787
*/
18001788
raw_spin_lock_irqsave(&lock->wait_lock, flags);
18011789
ret = __rt_mutex_slowlock_locked(lock, ww_ctx, state, &wake_q);
1802-
preempt_disable();
1803-
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1804-
wake_up_q(&wake_q);
1805-
preempt_enable();
1790+
raw_spin_unlock_irqrestore_wake(&lock->wait_lock, flags, &wake_q);
18061791
rt_mutex_post_schedule();
18071792

18081793
return ret;
@@ -1860,11 +1845,7 @@ static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock,
18601845
owner = rt_mutex_owner(lock);
18611846
else
18621847
owner = NULL;
1863-
preempt_disable();
1864-
raw_spin_unlock_irq(&lock->wait_lock);
1865-
wake_up_q(wake_q);
1866-
wake_q_init(wake_q);
1867-
preempt_enable();
1848+
raw_spin_unlock_irq_wake(&lock->wait_lock, wake_q);
18681849

18691850
if (!owner || !rtmutex_spin_on_owner(lock, &waiter, owner))
18701851
schedule_rtlock();
@@ -1893,10 +1874,7 @@ static __always_inline void __sched rtlock_slowlock(struct rt_mutex_base *lock)
18931874

18941875
raw_spin_lock_irqsave(&lock->wait_lock, flags);
18951876
rtlock_slowlock_locked(lock, &wake_q);
1896-
preempt_disable();
1897-
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1898-
wake_up_q(&wake_q);
1899-
preempt_enable();
1877+
raw_spin_unlock_irqrestore_wake(&lock->wait_lock, flags, &wake_q);
19001878
}
19011879

19021880
#endif /* RT_MUTEX_BUILD_SPINLOCKS */

kernel/locking/test-ww_mutex.c

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -404,7 +404,7 @@ static inline u32 prandom_u32_below(u32 ceil)
404404
static int *get_random_order(int count)
405405
{
406406
int *order;
407-
int n, r, tmp;
407+
int n, r;
408408

409409
order = kmalloc_array(count, sizeof(*order), GFP_KERNEL);
410410
if (!order)
@@ -415,11 +415,8 @@ static int *get_random_order(int count)
415415

416416
for (n = count - 1; n > 1; n--) {
417417
r = prandom_u32_below(n + 1);
418-
if (r != n) {
419-
tmp = order[n];
420-
order[n] = order[r];
421-
order[r] = tmp;
422-
}
418+
if (r != n)
419+
swap(order[n], order[r]);
423420
}
424421

425422
return order;

kernel/smp.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -815,7 +815,8 @@ static void smp_call_function_many_cond(const struct cpumask *mask,
815815
WARN_ON_ONCE(!in_task());
816816

817817
/* Check if we need local execution. */
818-
if ((scf_flags & SCF_RUN_LOCAL) && cpumask_test_cpu(this_cpu, mask))
818+
if ((scf_flags & SCF_RUN_LOCAL) && cpumask_test_cpu(this_cpu, mask) &&
819+
(!cond_func || cond_func(this_cpu, info)))
819820
run_local = true;
820821

821822
/* Check if we need remote execution, i.e., any CPU excluding this one. */
@@ -868,7 +869,7 @@ static void smp_call_function_many_cond(const struct cpumask *mask,
868869
send_call_function_ipi_mask(cfd->cpumask_ipi);
869870
}
870871

871-
if (run_local && (!cond_func || cond_func(this_cpu, info))) {
872+
if (run_local) {
872873
unsigned long flags;
873874

874875
local_irq_save(flags);

lib/Kconfig.debug

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1397,9 +1397,9 @@ config PROVE_LOCKING
13971397
For more details, see Documentation/locking/lockdep-design.rst.
13981398

13991399
config PROVE_RAW_LOCK_NESTING
1400-
bool
1400+
bool "Enable raw_spinlock - spinlock nesting checks" if !ARCH_SUPPORTS_RT
14011401
depends on PROVE_LOCKING
1402-
default y
1402+
default y if ARCH_SUPPORTS_RT
14031403
help
14041404
Enable the raw_spinlock vs. spinlock nesting checks which ensure
14051405
that the lock nesting rules for PREEMPT_RT enabled kernels are
@@ -1502,41 +1502,41 @@ config LOCKDEP_SMALL
15021502
bool
15031503

15041504
config LOCKDEP_BITS
1505-
int "Bitsize for MAX_LOCKDEP_ENTRIES"
1505+
int "Size for MAX_LOCKDEP_ENTRIES (as Nth power of 2)"
15061506
depends on LOCKDEP && !LOCKDEP_SMALL
1507-
range 10 30
1507+
range 10 24
15081508
default 15
15091509
help
15101510
Try increasing this value if you hit "BUG: MAX_LOCKDEP_ENTRIES too low!" message.
15111511

15121512
config LOCKDEP_CHAINS_BITS
1513-
int "Bitsize for MAX_LOCKDEP_CHAINS"
1513+
int "Size for MAX_LOCKDEP_CHAINS (as Nth power of 2)"
15141514
depends on LOCKDEP && !LOCKDEP_SMALL
15151515
range 10 21
15161516
default 16
15171517
help
15181518
Try increasing this value if you hit "BUG: MAX_LOCKDEP_CHAINS too low!" message.
15191519

15201520
config LOCKDEP_STACK_TRACE_BITS
1521-
int "Bitsize for MAX_STACK_TRACE_ENTRIES"
1521+
int "Size for MAX_STACK_TRACE_ENTRIES (as Nth power of 2)"
15221522
depends on LOCKDEP && !LOCKDEP_SMALL
1523-
range 10 30
1523+
range 10 26
15241524
default 19
15251525
help
15261526
Try increasing this value if you hit "BUG: MAX_STACK_TRACE_ENTRIES too low!" message.
15271527

15281528
config LOCKDEP_STACK_TRACE_HASH_BITS
1529-
int "Bitsize for STACK_TRACE_HASH_SIZE"
1529+
int "Size for STACK_TRACE_HASH_SIZE (as Nth power of 2)"
15301530
depends on LOCKDEP && !LOCKDEP_SMALL
1531-
range 10 30
1531+
range 10 26
15321532
default 14
15331533
help
15341534
Try increasing this value if you need large STACK_TRACE_HASH_SIZE.
15351535

15361536
config LOCKDEP_CIRCULAR_QUEUE_BITS
1537-
int "Bitsize for elements in circular_queue struct"
1537+
int "Size for elements in circular_queue struct (as Nth power of 2)"
15381538
depends on LOCKDEP
1539-
range 10 30
1539+
range 10 26
15401540
default 12
15411541
help
15421542
Try increasing this value if you hit "lockdep bfs error:-1" warning due to __cq_enqueue() failure.

0 commit comments

Comments
 (0)