Skip to content

Commit c19e5d3

Browse files
Frederic Weisbeckerfbq
authored andcommitted
rcu/exp: Move expedited kthread worker creation functions above rcutree_prepare_cpu()
The expedited kthread worker performing the per node initialization is going to be split into per node kthreads. As such, the future per node kthread creation will need to be called from CPU hotplug callbacks instead of an initcall, right beside the per node boost kthread creation. To prepare for that, move the kthread worker creation above rcutree_prepare_cpu() as a first step to make the review smoother for the upcoming modifications. No intended functional change. Signed-off-by: Frederic Weisbecker <frederic@kernel.org> Reviewed-by: Paul E. McKenney <paulmck@kernel.org> Signed-off-by: Boqun Feng <boqun.feng@gmail.com>
1 parent 7836b27 commit c19e5d3

File tree

1 file changed

+48
-48
lines changed

1 file changed

+48
-48
lines changed

kernel/rcu/tree.c

Lines changed: 48 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -4394,6 +4394,54 @@ rcu_boot_init_percpu_data(int cpu)
43944394
rcu_boot_init_nocb_percpu_data(rdp);
43954395
}
43964396

4397+
#ifdef CONFIG_RCU_EXP_KTHREAD
4398+
struct kthread_worker *rcu_exp_gp_kworker;
4399+
struct kthread_worker *rcu_exp_par_gp_kworker;
4400+
4401+
static void __init rcu_start_exp_gp_kworkers(void)
4402+
{
4403+
const char *par_gp_kworker_name = "rcu_exp_par_gp_kthread_worker";
4404+
const char *gp_kworker_name = "rcu_exp_gp_kthread_worker";
4405+
struct sched_param param = { .sched_priority = kthread_prio };
4406+
4407+
rcu_exp_gp_kworker = kthread_create_worker(0, gp_kworker_name);
4408+
if (IS_ERR_OR_NULL(rcu_exp_gp_kworker)) {
4409+
pr_err("Failed to create %s!\n", gp_kworker_name);
4410+
rcu_exp_gp_kworker = NULL;
4411+
return;
4412+
}
4413+
4414+
rcu_exp_par_gp_kworker = kthread_create_worker(0, par_gp_kworker_name);
4415+
if (IS_ERR_OR_NULL(rcu_exp_par_gp_kworker)) {
4416+
pr_err("Failed to create %s!\n", par_gp_kworker_name);
4417+
rcu_exp_par_gp_kworker = NULL;
4418+
kthread_destroy_worker(rcu_exp_gp_kworker);
4419+
rcu_exp_gp_kworker = NULL;
4420+
return;
4421+
}
4422+
4423+
sched_setscheduler_nocheck(rcu_exp_gp_kworker->task, SCHED_FIFO, &param);
4424+
sched_setscheduler_nocheck(rcu_exp_par_gp_kworker->task, SCHED_FIFO,
4425+
&param);
4426+
}
4427+
4428+
static inline void rcu_alloc_par_gp_wq(void)
4429+
{
4430+
}
4431+
#else /* !CONFIG_RCU_EXP_KTHREAD */
4432+
struct workqueue_struct *rcu_par_gp_wq;
4433+
4434+
static void __init rcu_start_exp_gp_kworkers(void)
4435+
{
4436+
}
4437+
4438+
static inline void rcu_alloc_par_gp_wq(void)
4439+
{
4440+
rcu_par_gp_wq = alloc_workqueue("rcu_par_gp", WQ_MEM_RECLAIM, 0);
4441+
WARN_ON(!rcu_par_gp_wq);
4442+
}
4443+
#endif /* CONFIG_RCU_EXP_KTHREAD */
4444+
43974445
/*
43984446
* Invoked early in the CPU-online process, when pretty much all services
43994447
* are available. The incoming CPU is not present.
@@ -4730,54 +4778,6 @@ static int rcu_pm_notify(struct notifier_block *self,
47304778
return NOTIFY_OK;
47314779
}
47324780

4733-
#ifdef CONFIG_RCU_EXP_KTHREAD
4734-
struct kthread_worker *rcu_exp_gp_kworker;
4735-
struct kthread_worker *rcu_exp_par_gp_kworker;
4736-
4737-
static void __init rcu_start_exp_gp_kworkers(void)
4738-
{
4739-
const char *par_gp_kworker_name = "rcu_exp_par_gp_kthread_worker";
4740-
const char *gp_kworker_name = "rcu_exp_gp_kthread_worker";
4741-
struct sched_param param = { .sched_priority = kthread_prio };
4742-
4743-
rcu_exp_gp_kworker = kthread_create_worker(0, gp_kworker_name);
4744-
if (IS_ERR_OR_NULL(rcu_exp_gp_kworker)) {
4745-
pr_err("Failed to create %s!\n", gp_kworker_name);
4746-
rcu_exp_gp_kworker = NULL;
4747-
return;
4748-
}
4749-
4750-
rcu_exp_par_gp_kworker = kthread_create_worker(0, par_gp_kworker_name);
4751-
if (IS_ERR_OR_NULL(rcu_exp_par_gp_kworker)) {
4752-
pr_err("Failed to create %s!\n", par_gp_kworker_name);
4753-
rcu_exp_par_gp_kworker = NULL;
4754-
kthread_destroy_worker(rcu_exp_gp_kworker);
4755-
rcu_exp_gp_kworker = NULL;
4756-
return;
4757-
}
4758-
4759-
sched_setscheduler_nocheck(rcu_exp_gp_kworker->task, SCHED_FIFO, &param);
4760-
sched_setscheduler_nocheck(rcu_exp_par_gp_kworker->task, SCHED_FIFO,
4761-
&param);
4762-
}
4763-
4764-
static inline void rcu_alloc_par_gp_wq(void)
4765-
{
4766-
}
4767-
#else /* !CONFIG_RCU_EXP_KTHREAD */
4768-
struct workqueue_struct *rcu_par_gp_wq;
4769-
4770-
static void __init rcu_start_exp_gp_kworkers(void)
4771-
{
4772-
}
4773-
4774-
static inline void rcu_alloc_par_gp_wq(void)
4775-
{
4776-
rcu_par_gp_wq = alloc_workqueue("rcu_par_gp", WQ_MEM_RECLAIM, 0);
4777-
WARN_ON(!rcu_par_gp_wq);
4778-
}
4779-
#endif /* CONFIG_RCU_EXP_KTHREAD */
4780-
47814781
/*
47824782
* Spawn the kthreads that handle RCU's grace periods.
47834783
*/

0 commit comments

Comments
 (0)