@@ -89,15 +89,31 @@ static inline int __ww_mutex_check_kill(struct rt_mutex *lock,
89
89
* set this bit before looking at the lock.
90
90
*/
91
91
92
- static __always_inline void
93
- rt_mutex_set_owner (struct rt_mutex_base * lock , struct task_struct * owner )
92
+ static __always_inline struct task_struct *
93
+ rt_mutex_owner_encode (struct rt_mutex_base * lock , struct task_struct * owner )
94
94
{
95
95
unsigned long val = (unsigned long )owner ;
96
96
97
97
if (rt_mutex_has_waiters (lock ))
98
98
val |= RT_MUTEX_HAS_WAITERS ;
99
99
100
- WRITE_ONCE (lock -> owner , (struct task_struct * )val );
100
+ return (struct task_struct * )val ;
101
+ }
102
+
103
+ static __always_inline void
104
+ rt_mutex_set_owner (struct rt_mutex_base * lock , struct task_struct * owner )
105
+ {
106
+ /*
107
+ * lock->wait_lock is held but explicit acquire semantics are needed
108
+ * for a new lock owner so WRITE_ONCE is insufficient.
109
+ */
110
+ xchg_acquire (& lock -> owner , rt_mutex_owner_encode (lock , owner ));
111
+ }
112
+
113
+ static __always_inline void rt_mutex_clear_owner (struct rt_mutex_base * lock )
114
+ {
115
+ /* lock->wait_lock is held so the unlock provides release semantics. */
116
+ WRITE_ONCE (lock -> owner , rt_mutex_owner_encode (lock , NULL ));
101
117
}
102
118
103
119
static __always_inline void clear_rt_mutex_waiters (struct rt_mutex_base * lock )
@@ -106,7 +122,8 @@ static __always_inline void clear_rt_mutex_waiters(struct rt_mutex_base *lock)
106
122
((unsigned long )lock -> owner & ~RT_MUTEX_HAS_WAITERS );
107
123
}
108
124
109
- static __always_inline void fixup_rt_mutex_waiters (struct rt_mutex_base * lock )
125
+ static __always_inline void
126
+ fixup_rt_mutex_waiters (struct rt_mutex_base * lock , bool acquire_lock )
110
127
{
111
128
unsigned long owner , * p = (unsigned long * ) & lock -> owner ;
112
129
@@ -172,8 +189,21 @@ static __always_inline void fixup_rt_mutex_waiters(struct rt_mutex_base *lock)
172
189
* still set.
173
190
*/
174
191
owner = READ_ONCE (* p );
175
- if (owner & RT_MUTEX_HAS_WAITERS )
176
- WRITE_ONCE (* p , owner & ~RT_MUTEX_HAS_WAITERS );
192
+ if (owner & RT_MUTEX_HAS_WAITERS ) {
193
+ /*
194
+ * See rt_mutex_set_owner() and rt_mutex_clear_owner() on
195
+ * why xchg_acquire() is used for updating owner for
196
+ * locking and WRITE_ONCE() for unlocking.
197
+ *
198
+ * WRITE_ONCE() would work for the acquire case too, but
199
+ * in case that the lock acquisition failed it might
200
+ * force other lockers into the slow path unnecessarily.
201
+ */
202
+ if (acquire_lock )
203
+ xchg_acquire (p , owner & ~RT_MUTEX_HAS_WAITERS );
204
+ else
205
+ WRITE_ONCE (* p , owner & ~RT_MUTEX_HAS_WAITERS );
206
+ }
177
207
}
178
208
179
209
/*
@@ -208,6 +238,13 @@ static __always_inline void mark_rt_mutex_waiters(struct rt_mutex_base *lock)
208
238
owner = * p ;
209
239
} while (cmpxchg_relaxed (p , owner ,
210
240
owner | RT_MUTEX_HAS_WAITERS ) != owner );
241
+
242
+ /*
243
+ * The cmpxchg loop above is relaxed to avoid back-to-back ACQUIRE
244
+ * operations in the event of contention. Ensure the successful
245
+ * cmpxchg is visible.
246
+ */
247
+ smp_mb__after_atomic ();
211
248
}
212
249
213
250
/*
@@ -1243,7 +1280,7 @@ static int __sched __rt_mutex_slowtrylock(struct rt_mutex_base *lock)
1243
1280
* try_to_take_rt_mutex() sets the lock waiters bit
1244
1281
* unconditionally. Clean this up.
1245
1282
*/
1246
- fixup_rt_mutex_waiters (lock );
1283
+ fixup_rt_mutex_waiters (lock , true );
1247
1284
1248
1285
return ret ;
1249
1286
}
@@ -1604,7 +1641,7 @@ static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock,
1604
1641
* try_to_take_rt_mutex() sets the waiter bit
1605
1642
* unconditionally. We might have to fix that up.
1606
1643
*/
1607
- fixup_rt_mutex_waiters (lock );
1644
+ fixup_rt_mutex_waiters (lock , true );
1608
1645
1609
1646
trace_contention_end (lock , ret );
1610
1647
@@ -1719,7 +1756,7 @@ static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock)
1719
1756
* try_to_take_rt_mutex() sets the waiter bit unconditionally.
1720
1757
* We might have to fix that up:
1721
1758
*/
1722
- fixup_rt_mutex_waiters (lock );
1759
+ fixup_rt_mutex_waiters (lock , true );
1723
1760
debug_rt_mutex_free_waiter (& waiter );
1724
1761
1725
1762
trace_contention_end (lock , 0 );
0 commit comments