Skip to content

Commit 95d248d

Browse files
committed
Merge tag 'locking_urgent_for_v6.2_rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking fixes from Borislav Petkov: - Prevent the leaking of a debug timer in futex_waitv() - A preempt-RT mutex locking fix, adding the proper acquire semantics * tag 'locking_urgent_for_v6.2_rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: futex: Fix futex_waitv() hrtimer debug object leak on kcalloc error rtmutex: Add acquire semantics for rtmutex lock acquisition slow path
2 parents 8b41948 + 94cd8fa commit 95d248d

File tree

3 files changed

+56
-16
lines changed

3 files changed

+56
-16
lines changed

kernel/futex/syscalls.c

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -286,19 +286,22 @@ SYSCALL_DEFINE5(futex_waitv, struct futex_waitv __user *, waiters,
286286
}
287287

288288
futexv = kcalloc(nr_futexes, sizeof(*futexv), GFP_KERNEL);
289-
if (!futexv)
290-
return -ENOMEM;
289+
if (!futexv) {
290+
ret = -ENOMEM;
291+
goto destroy_timer;
292+
}
291293

292294
ret = futex_parse_waitv(futexv, waiters, nr_futexes);
293295
if (!ret)
294296
ret = futex_wait_multiple(futexv, nr_futexes, timeout ? &to : NULL);
295297

298+
kfree(futexv);
299+
300+
destroy_timer:
296301
if (timeout) {
297302
hrtimer_cancel(&to.timer);
298303
destroy_hrtimer_on_stack(&to.timer);
299304
}
300-
301-
kfree(futexv);
302305
return ret;
303306
}
304307

kernel/locking/rtmutex.c

Lines changed: 46 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -89,15 +89,31 @@ static inline int __ww_mutex_check_kill(struct rt_mutex *lock,
8989
* set this bit before looking at the lock.
9090
*/
9191

92-
static __always_inline void
93-
rt_mutex_set_owner(struct rt_mutex_base *lock, struct task_struct *owner)
92+
static __always_inline struct task_struct *
93+
rt_mutex_owner_encode(struct rt_mutex_base *lock, struct task_struct *owner)
9494
{
9595
unsigned long val = (unsigned long)owner;
9696

9797
if (rt_mutex_has_waiters(lock))
9898
val |= RT_MUTEX_HAS_WAITERS;
9999

100-
WRITE_ONCE(lock->owner, (struct task_struct *)val);
100+
return (struct task_struct *)val;
101+
}
102+
103+
static __always_inline void
104+
rt_mutex_set_owner(struct rt_mutex_base *lock, struct task_struct *owner)
105+
{
106+
/*
107+
* lock->wait_lock is held but explicit acquire semantics are needed
108+
* for a new lock owner so WRITE_ONCE is insufficient.
109+
*/
110+
xchg_acquire(&lock->owner, rt_mutex_owner_encode(lock, owner));
111+
}
112+
113+
static __always_inline void rt_mutex_clear_owner(struct rt_mutex_base *lock)
114+
{
115+
/* lock->wait_lock is held so the unlock provides release semantics. */
116+
WRITE_ONCE(lock->owner, rt_mutex_owner_encode(lock, NULL));
101117
}
102118

103119
static __always_inline void clear_rt_mutex_waiters(struct rt_mutex_base *lock)
@@ -106,7 +122,8 @@ static __always_inline void clear_rt_mutex_waiters(struct rt_mutex_base *lock)
106122
((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
107123
}
108124

109-
static __always_inline void fixup_rt_mutex_waiters(struct rt_mutex_base *lock)
125+
static __always_inline void
126+
fixup_rt_mutex_waiters(struct rt_mutex_base *lock, bool acquire_lock)
110127
{
111128
unsigned long owner, *p = (unsigned long *) &lock->owner;
112129

@@ -172,8 +189,21 @@ static __always_inline void fixup_rt_mutex_waiters(struct rt_mutex_base *lock)
172189
* still set.
173190
*/
174191
owner = READ_ONCE(*p);
175-
if (owner & RT_MUTEX_HAS_WAITERS)
176-
WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS);
192+
if (owner & RT_MUTEX_HAS_WAITERS) {
193+
/*
194+
* See rt_mutex_set_owner() and rt_mutex_clear_owner() on
195+
* why xchg_acquire() is used for updating owner for
196+
* locking and WRITE_ONCE() for unlocking.
197+
*
198+
* WRITE_ONCE() would work for the acquire case too, but
199+
* in case that the lock acquisition failed it might
200+
* force other lockers into the slow path unnecessarily.
201+
*/
202+
if (acquire_lock)
203+
xchg_acquire(p, owner & ~RT_MUTEX_HAS_WAITERS);
204+
else
205+
WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS);
206+
}
177207
}
178208

179209
/*
@@ -208,6 +238,13 @@ static __always_inline void mark_rt_mutex_waiters(struct rt_mutex_base *lock)
208238
owner = *p;
209239
} while (cmpxchg_relaxed(p, owner,
210240
owner | RT_MUTEX_HAS_WAITERS) != owner);
241+
242+
/*
243+
* The cmpxchg loop above is relaxed to avoid back-to-back ACQUIRE
244+
* operations in the event of contention. Ensure the successful
245+
* cmpxchg is visible.
246+
*/
247+
smp_mb__after_atomic();
211248
}
212249

213250
/*
@@ -1243,7 +1280,7 @@ static int __sched __rt_mutex_slowtrylock(struct rt_mutex_base *lock)
12431280
* try_to_take_rt_mutex() sets the lock waiters bit
12441281
* unconditionally. Clean this up.
12451282
*/
1246-
fixup_rt_mutex_waiters(lock);
1283+
fixup_rt_mutex_waiters(lock, true);
12471284

12481285
return ret;
12491286
}
@@ -1604,7 +1641,7 @@ static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock,
16041641
* try_to_take_rt_mutex() sets the waiter bit
16051642
* unconditionally. We might have to fix that up.
16061643
*/
1607-
fixup_rt_mutex_waiters(lock);
1644+
fixup_rt_mutex_waiters(lock, true);
16081645

16091646
trace_contention_end(lock, ret);
16101647

@@ -1719,7 +1756,7 @@ static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock)
17191756
* try_to_take_rt_mutex() sets the waiter bit unconditionally.
17201757
* We might have to fix that up:
17211758
*/
1722-
fixup_rt_mutex_waiters(lock);
1759+
fixup_rt_mutex_waiters(lock, true);
17231760
debug_rt_mutex_free_waiter(&waiter);
17241761

17251762
trace_contention_end(lock, 0);

kernel/locking/rtmutex_api.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -267,7 +267,7 @@ void __sched rt_mutex_init_proxy_locked(struct rt_mutex_base *lock,
267267
void __sched rt_mutex_proxy_unlock(struct rt_mutex_base *lock)
268268
{
269269
debug_rt_mutex_proxy_unlock(lock);
270-
rt_mutex_set_owner(lock, NULL);
270+
rt_mutex_clear_owner(lock);
271271
}
272272

273273
/**
@@ -382,7 +382,7 @@ int __sched rt_mutex_wait_proxy_lock(struct rt_mutex_base *lock,
382382
* try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
383383
* have to fix that up.
384384
*/
385-
fixup_rt_mutex_waiters(lock);
385+
fixup_rt_mutex_waiters(lock, true);
386386
raw_spin_unlock_irq(&lock->wait_lock);
387387

388388
return ret;
@@ -438,7 +438,7 @@ bool __sched rt_mutex_cleanup_proxy_lock(struct rt_mutex_base *lock,
438438
* try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
439439
* have to fix that up.
440440
*/
441-
fixup_rt_mutex_waiters(lock);
441+
fixup_rt_mutex_waiters(lock, false);
442442

443443
raw_spin_unlock_irq(&lock->wait_lock);
444444

0 commit comments

Comments
 (0)