Skip to content

Commit 3add00b

Browse files
committed
Merge branches 'rcu-doc.2024.02.14a', 'rcu-nocb.2024.02.14a', 'rcu-exp.2024.02.14a', 'rcu-tasks.2024.02.26a' and 'rcu-misc.2024.02.14a' into rcu.2024.02.26a
5 parents e15aed4 + f3c4c00 + 23da2ad + 0bb11a3 + c90e3ec commit 3add00b

File tree

19 files changed

+394
-308
lines changed

19 files changed

+394
-308
lines changed

Documentation/admin-guide/kernel-parameters.txt

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5047,6 +5047,11 @@
50475047
this kernel boot parameter, forcibly setting it
50485048
to zero.
50495049

5050+
rcutree.enable_rcu_lazy= [KNL]
5051+
To save power, batch RCU callbacks and flush after
5052+
delay, memory pressure or callback list growing too
5053+
big.
5054+
50505055
rcuscale.gp_async= [KNL]
50515056
Measure performance of asynchronous
50525057
grace-period primitives such as call_rcu().

include/linux/rcu_sync.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,6 @@ static inline bool rcu_sync_is_idle(struct rcu_sync *rsp)
3737
}
3838

3939
extern void rcu_sync_init(struct rcu_sync *);
40-
extern void rcu_sync_enter_start(struct rcu_sync *);
4140
extern void rcu_sync_enter(struct rcu_sync *);
4241
extern void rcu_sync_exit(struct rcu_sync *);
4342
extern void rcu_sync_dtor(struct rcu_sync *);

include/linux/rcupdate.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -184,9 +184,9 @@ void rcu_tasks_trace_qs_blkd(struct task_struct *t);
184184
do { \
185185
int ___rttq_nesting = READ_ONCE((t)->trc_reader_nesting); \
186186
\
187-
if (likely(!READ_ONCE((t)->trc_reader_special.b.need_qs)) && \
187+
if (unlikely(READ_ONCE((t)->trc_reader_special.b.need_qs) == TRC_NEED_QS) && \
188188
likely(!___rttq_nesting)) { \
189-
rcu_trc_cmpxchg_need_qs((t), 0, TRC_NEED_QS_CHECKED); \
189+
rcu_trc_cmpxchg_need_qs((t), TRC_NEED_QS, TRC_NEED_QS_CHECKED); \
190190
} else if (___rttq_nesting && ___rttq_nesting != INT_MIN && \
191191
!READ_ONCE((t)->trc_reader_special.b.blocked)) { \
192192
rcu_tasks_trace_qs_blkd(t); \

include/linux/sched.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -858,6 +858,8 @@ struct task_struct {
858858
u8 rcu_tasks_idx;
859859
int rcu_tasks_idle_cpu;
860860
struct list_head rcu_tasks_holdout_list;
861+
int rcu_tasks_exit_cpu;
862+
struct list_head rcu_tasks_exit_list;
861863
#endif /* #ifdef CONFIG_TASKS_RCU */
862864

863865
#ifdef CONFIG_TASKS_TRACE_RCU

init/init_task.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -147,6 +147,7 @@ struct task_struct init_task __aligned(L1_CACHE_BYTES) = {
147147
.rcu_tasks_holdout = false,
148148
.rcu_tasks_holdout_list = LIST_HEAD_INIT(init_task.rcu_tasks_holdout_list),
149149
.rcu_tasks_idle_cpu = -1,
150+
.rcu_tasks_exit_list = LIST_HEAD_INIT(init_task.rcu_tasks_exit_list),
150151
#endif
151152
#ifdef CONFIG_TASKS_TRACE_RCU
152153
.trc_reader_nesting = 0,

kernel/fork.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1976,6 +1976,7 @@ static inline void rcu_copy_process(struct task_struct *p)
19761976
p->rcu_tasks_holdout = false;
19771977
INIT_LIST_HEAD(&p->rcu_tasks_holdout_list);
19781978
p->rcu_tasks_idle_cpu = -1;
1979+
INIT_LIST_HEAD(&p->rcu_tasks_exit_list);
19791980
#endif /* #ifdef CONFIG_TASKS_RCU */
19801981
#ifdef CONFIG_TASKS_TRACE_RCU
19811982
p->trc_reader_nesting = 0;

kernel/rcu/Kconfig

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -314,6 +314,19 @@ config RCU_LAZY
314314
To save power, batch RCU callbacks and flush after delay, memory
315315
pressure, or callback list growing too big.
316316

317+
Requires rcu_nocbs=all to be set.
318+
319+
Use rcutree.enable_rcu_lazy=0 to turn it off at boot time.
320+
321+
config RCU_LAZY_DEFAULT_OFF
322+
bool "Turn RCU lazy invocation off by default"
323+
depends on RCU_LAZY
324+
default n
325+
help
326+
Allows building the kernel with CONFIG_RCU_LAZY=y yet keep it default
327+
off. Boot time param rcutree.enable_rcu_lazy=1 can be used to switch
328+
it back on.
329+
317330
config RCU_DOUBLE_CHECK_CB_TIME
318331
bool "RCU callback-batch backup time check"
319332
depends on RCU_EXPERT

kernel/rcu/rcu.h

Lines changed: 10 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -528,6 +528,12 @@ struct task_struct *get_rcu_tasks_gp_kthread(void);
528528
struct task_struct *get_rcu_tasks_rude_gp_kthread(void);
529529
#endif // # ifdef CONFIG_TASKS_RUDE_RCU
530530

531+
#ifdef CONFIG_TASKS_RCU_GENERIC
532+
void tasks_cblist_init_generic(void);
533+
#else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
534+
static inline void tasks_cblist_init_generic(void) { }
535+
#endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */
536+
531537
#define RCU_SCHEDULER_INACTIVE 0
532538
#define RCU_SCHEDULER_INIT 1
533539
#define RCU_SCHEDULER_RUNNING 2
@@ -543,11 +549,11 @@ enum rcutorture_type {
543549
};
544550

545551
#if defined(CONFIG_RCU_LAZY)
546-
unsigned long rcu_lazy_get_jiffies_till_flush(void);
547-
void rcu_lazy_set_jiffies_till_flush(unsigned long j);
552+
unsigned long rcu_get_jiffies_lazy_flush(void);
553+
void rcu_set_jiffies_lazy_flush(unsigned long j);
548554
#else
549-
static inline unsigned long rcu_lazy_get_jiffies_till_flush(void) { return 0; }
550-
static inline void rcu_lazy_set_jiffies_till_flush(unsigned long j) { }
555+
static inline unsigned long rcu_get_jiffies_lazy_flush(void) { return 0; }
556+
static inline void rcu_set_jiffies_lazy_flush(unsigned long j) { }
551557
#endif
552558

553559
#if defined(CONFIG_TREE_RCU)
@@ -623,12 +629,7 @@ int rcu_get_gp_kthreads_prio(void);
623629
void rcu_fwd_progress_check(unsigned long j);
624630
void rcu_force_quiescent_state(void);
625631
extern struct workqueue_struct *rcu_gp_wq;
626-
#ifdef CONFIG_RCU_EXP_KTHREAD
627632
extern struct kthread_worker *rcu_exp_gp_kworker;
628-
extern struct kthread_worker *rcu_exp_par_gp_kworker;
629-
#else /* !CONFIG_RCU_EXP_KTHREAD */
630-
extern struct workqueue_struct *rcu_par_gp_wq;
631-
#endif /* CONFIG_RCU_EXP_KTHREAD */
632633
void rcu_gp_slow_register(atomic_t *rgssp);
633634
void rcu_gp_slow_unregister(atomic_t *rgssp);
634635
#endif /* #else #ifdef CONFIG_TINY_RCU */

kernel/rcu/rcuscale.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -764,9 +764,9 @@ kfree_scale_init(void)
764764

765765
if (kfree_by_call_rcu) {
766766
/* do a test to check the timeout. */
767-
orig_jif = rcu_lazy_get_jiffies_till_flush();
767+
orig_jif = rcu_get_jiffies_lazy_flush();
768768

769-
rcu_lazy_set_jiffies_till_flush(2 * HZ);
769+
rcu_set_jiffies_lazy_flush(2 * HZ);
770770
rcu_barrier();
771771

772772
jif_start = jiffies;
@@ -775,7 +775,7 @@ kfree_scale_init(void)
775775

776776
smp_cond_load_relaxed(&rcu_lazy_test1_cb_called, VAL == 1);
777777

778-
rcu_lazy_set_jiffies_till_flush(orig_jif);
778+
rcu_set_jiffies_lazy_flush(orig_jif);
779779

780780
if (WARN_ON_ONCE(jiffies_at_lazy_cb - jif_start < 2 * HZ)) {
781781
pr_alert("ERROR: call_rcu() CBs are not being lazy as expected!\n");

kernel/rcu/rcutorture.c

Lines changed: 9 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1368,9 +1368,13 @@ rcu_torture_writer(void *arg)
13681368
struct rcu_torture *rp;
13691369
struct rcu_torture *old_rp;
13701370
static DEFINE_TORTURE_RANDOM(rand);
1371+
unsigned long stallsdone = jiffies;
13711372
bool stutter_waited;
13721373
unsigned long ulo[NUM_ACTIVE_RCU_POLL_OLDSTATE];
13731374

1375+
// If a new stall test is added, this must be adjusted.
1376+
if (stall_cpu_holdoff + stall_gp_kthread + stall_cpu)
1377+
stallsdone += (stall_cpu_holdoff + stall_gp_kthread + stall_cpu + 60) * HZ;
13741378
VERBOSE_TOROUT_STRING("rcu_torture_writer task started");
13751379
if (!can_expedite)
13761380
pr_alert("%s" TORTURE_FLAG
@@ -1576,11 +1580,11 @@ rcu_torture_writer(void *arg)
15761580
!atomic_read(&rcu_fwd_cb_nodelay) &&
15771581
!cur_ops->slow_gps &&
15781582
!torture_must_stop() &&
1579-
boot_ended)
1583+
boot_ended &&
1584+
time_after(jiffies, stallsdone))
15801585
for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++)
15811586
if (list_empty(&rcu_tortures[i].rtort_free) &&
1582-
rcu_access_pointer(rcu_torture_current) !=
1583-
&rcu_tortures[i]) {
1587+
rcu_access_pointer(rcu_torture_current) != &rcu_tortures[i]) {
15841588
tracing_off();
15851589
show_rcu_gp_kthreads();
15861590
WARN(1, "%s: rtort_pipe_count: %d\n", __func__, rcu_tortures[i].rtort_pipe_count);
@@ -2441,7 +2445,8 @@ static struct notifier_block rcu_torture_stall_block = {
24412445

24422446
/*
24432447
* CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then
2444-
* induces a CPU stall for the time specified by stall_cpu.
2448+
* induces a CPU stall for the time specified by stall_cpu. If a new
2449+
* stall test is added, stallsdone in rcu_torture_writer() must be adjusted.
24452450
*/
24462451
static int rcu_torture_stall(void *args)
24472452
{

0 commit comments

Comments
 (0)