Skip to content

Commit 7836b27

Browse files
Frederic Weisbeckerfbq
authored andcommitted
rcu: s/boost_kthread_mutex/kthread_mutex
This mutex is currently protecting per node boost kthreads creation and affinity setting across CPU hotplug operations. Since the expedited kworkers will soon be split per node as well, they will be subject to the same concurrency constraints against hotplug. Therefore their creation and affinity tuning operations will be grouped with those of boost kthreads and then rely on the same mutex. To prepare for that, generalize its name. Signed-off-by: Frederic Weisbecker <frederic@kernel.org> Reviewed-by: Paul E. McKenney <paulmck@kernel.org> Signed-off-by: Boqun Feng <boqun.feng@gmail.com>
1 parent e7539ff commit 7836b27

File tree

3 files changed

+7
-7
lines changed

3 files changed

+7
-7
lines changed

kernel/rcu/tree.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4918,7 +4918,7 @@ static void __init rcu_init_one(void)
49184918
init_waitqueue_head(&rnp->exp_wq[2]);
49194919
init_waitqueue_head(&rnp->exp_wq[3]);
49204920
spin_lock_init(&rnp->exp_lock);
4921-
mutex_init(&rnp->boost_kthread_mutex);
4921+
mutex_init(&rnp->kthread_mutex);
49224922
raw_spin_lock_init(&rnp->exp_poll_lock);
49234923
rnp->exp_seq_poll_rq = RCU_GET_STATE_COMPLETED;
49244924
INIT_WORK(&rnp->exp_poll_wq, sync_rcu_do_polled_gp);

kernel/rcu/tree.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -113,7 +113,7 @@ struct rcu_node {
113113
/* side effect, not as a lock. */
114114
unsigned long boost_time;
115115
/* When to start boosting (jiffies). */
116-
struct mutex boost_kthread_mutex;
116+
struct mutex kthread_mutex;
117117
/* Exclusion for thread spawning and affinity */
118118
/* manipulation. */
119119
struct task_struct *boost_kthread_task;

kernel/rcu/tree_plugin.h

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1195,7 +1195,7 @@ static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp)
11951195
struct sched_param sp;
11961196
struct task_struct *t;
11971197

1198-
mutex_lock(&rnp->boost_kthread_mutex);
1198+
mutex_lock(&rnp->kthread_mutex);
11991199
if (rnp->boost_kthread_task || !rcu_scheduler_fully_active)
12001200
goto out;
12011201

@@ -1212,7 +1212,7 @@ static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp)
12121212
wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
12131213

12141214
out:
1215-
mutex_unlock(&rnp->boost_kthread_mutex);
1215+
mutex_unlock(&rnp->kthread_mutex);
12161216
}
12171217

12181218
/*
@@ -1224,7 +1224,7 @@ static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp)
12241224
* no outgoing CPU. If there are no CPUs left in the affinity set,
12251225
* this function allows the kthread to execute on any CPU.
12261226
*
1227-
* Any future concurrent calls are serialized via ->boost_kthread_mutex.
1227+
* Any future concurrent calls are serialized via ->kthread_mutex.
12281228
*/
12291229
static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
12301230
{
@@ -1237,7 +1237,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
12371237
return;
12381238
if (!zalloc_cpumask_var(&cm, GFP_KERNEL))
12391239
return;
1240-
mutex_lock(&rnp->boost_kthread_mutex);
1240+
mutex_lock(&rnp->kthread_mutex);
12411241
mask = rcu_rnp_online_cpus(rnp);
12421242
for_each_leaf_node_possible_cpu(rnp, cpu)
12431243
if ((mask & leaf_node_cpu_bit(rnp, cpu)) &&
@@ -1250,7 +1250,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
12501250
cpumask_clear_cpu(outgoingcpu, cm);
12511251
}
12521252
set_cpus_allowed_ptr(t, cm);
1253-
mutex_unlock(&rnp->boost_kthread_mutex);
1253+
mutex_unlock(&rnp->kthread_mutex);
12541254
free_cpumask_var(cm);
12551255
}
12561256

0 commit comments

Comments
 (0)