Skip to content

Commit dc1f789

Browse files
author
Peter Zijlstra
committed
locking/mutex: Make contention tracepoints more consistent wrt adaptive spinning
Have the trace_contention_*() tracepoints consistently include adaptive spinning. In order to differentiate between the spinning and non-spinning states add LCB_F_MUTEX and combine with LCB_F_SPIN. The consequence is that a mutex contention can now triggler multiple _begin() tracepoints before triggering an _end(). Additionally, this fixes one path where mutex would trigger _end() without ever seeing a _begin(). Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
1 parent ee042be commit dc1f789

File tree

2 files changed

+15
-5
lines changed

2 files changed

+15
-5
lines changed

include/trace/events/lock.h

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414
#define LCB_F_WRITE (1U << 2)
1515
#define LCB_F_RT (1U << 3)
1616
#define LCB_F_PERCPU (1U << 4)
17+
#define LCB_F_MUTEX (1U << 5)
1718

1819

1920
#ifdef CONFIG_LOCKDEP
@@ -113,7 +114,8 @@ TRACE_EVENT(contention_begin,
113114
{ LCB_F_READ, "READ" },
114115
{ LCB_F_WRITE, "WRITE" },
115116
{ LCB_F_RT, "RT" },
116-
{ LCB_F_PERCPU, "PERCPU" }
117+
{ LCB_F_PERCPU, "PERCPU" },
118+
{ LCB_F_MUTEX, "MUTEX" }
117119
))
118120
);
119121

kernel/locking/mutex.c

Lines changed: 12 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -602,12 +602,14 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
602602
preempt_disable();
603603
mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
604604

605+
trace_contention_begin(lock, LCB_F_MUTEX | LCB_F_SPIN);
605606
if (__mutex_trylock(lock) ||
606607
mutex_optimistic_spin(lock, ww_ctx, NULL)) {
607608
/* got the lock, yay! */
608609
lock_acquired(&lock->dep_map, ip);
609610
if (ww_ctx)
610611
ww_mutex_set_context_fastpath(ww, ww_ctx);
612+
trace_contention_end(lock, 0);
611613
preempt_enable();
612614
return 0;
613615
}
@@ -644,7 +646,7 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
644646
}
645647

646648
set_current_state(state);
647-
trace_contention_begin(lock, 0);
649+
trace_contention_begin(lock, LCB_F_MUTEX);
648650
for (;;) {
649651
bool first;
650652

@@ -684,10 +686,16 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
684686
* state back to RUNNING and fall through the next schedule(),
685687
* or we must see its unlock and acquire.
686688
*/
687-
if (__mutex_trylock_or_handoff(lock, first) ||
688-
(first && mutex_optimistic_spin(lock, ww_ctx, &waiter)))
689+
if (__mutex_trylock_or_handoff(lock, first))
689690
break;
690691

692+
if (first) {
693+
trace_contention_begin(lock, LCB_F_MUTEX | LCB_F_SPIN);
694+
if (mutex_optimistic_spin(lock, ww_ctx, &waiter))
695+
break;
696+
trace_contention_begin(lock, LCB_F_MUTEX);
697+
}
698+
691699
raw_spin_lock(&lock->wait_lock);
692700
}
693701
raw_spin_lock(&lock->wait_lock);
@@ -723,8 +731,8 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
723731
err:
724732
__set_current_state(TASK_RUNNING);
725733
__mutex_remove_waiter(lock, &waiter);
726-
trace_contention_end(lock, ret);
727734
err_early_kill:
735+
trace_contention_end(lock, ret);
728736
raw_spin_unlock(&lock->wait_lock);
729737
debug_mutex_free_waiter(&waiter);
730738
mutex_release(&lock->dep_map, ip);

0 commit comments

Comments
 (0)