Skip to content

Commit ec9d635

Browse files
paulmckrcuurezki
authored andcommitted
rcutorture: Make rcutorture_one_extend() check reader state
This commit adds reader-state debugging checks to a new function named rcutorture_one_extend_check(), which is invoked before and after setting new reader states by the existing rcutorture_one_extend() function. These checks have proven to be rather heavyweight, reducing reproduction rate of some failures by a factor of two. They are therefore hidden behind a new RCU_TORTURE_TEST_CHK_RDR_STATE Kconfig option. Signed-off-by: Paul E. McKenney <paulmck@kernel.org> Cc: Frederic Weisbecker <frederic@kernel.org> Tested-by: kernel test robot <oliver.sang@intel.com> Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
1 parent 16338e7 commit ec9d635

File tree

2 files changed

+79
-8
lines changed

2 files changed

+79
-8
lines changed

kernel/rcu/Kconfig.debug

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -53,6 +53,22 @@ config RCU_TORTURE_TEST
5353
Say M if you want the RCU torture tests to build as a module.
5454
Say N if you are unsure.
5555

56+
config RCU_TORTURE_TEST_CHK_RDR_STATE
57+
tristate "Check rcutorture reader state"
58+
depends on RCU_TORTURE_TEST
59+
default n
60+
help
61+
This option causes rcutorture to check the desired rcutorture
62+
reader state for each segment against the actual context.
63+
Note that PREEMPT_COUNT must be enabled if the preempt-disabled
64+
and bh-disabled checks are to take effect, and that PREEMPT_RCU
65+
must be enabled for the RCU-nesting checks to take effect.
66+
These checks add overhead, and this Kconfig options is therefore
67+
disabled by default.
68+
69+
Say Y here if you want rcutorture reader contexts checked.
70+
Say N if you are unsure.
71+
5672
config RCU_TORTURE_TEST_LOG_CPU
5773
tristate "Log CPU for rcutorture failures"
5874
depends on RCU_TORTURE_TEST

kernel/rcu/rcutorture.c

Lines changed: 63 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -359,7 +359,8 @@ struct rcu_torture_ops {
359359
void (*read_delay)(struct torture_random_state *rrsp,
360360
struct rt_read_seg *rtrsp);
361361
void (*readunlock)(int idx);
362-
int (*readlock_held)(void);
362+
int (*readlock_held)(void); // lockdep.
363+
int (*readlock_nesting)(void); // actual nesting, if available, -1 if not.
363364
unsigned long (*get_gp_seq)(void);
364365
unsigned long (*gp_diff)(unsigned long new, unsigned long old);
365366
void (*deferred_free)(struct rcu_torture *p);
@@ -466,6 +467,15 @@ static void rcu_torture_read_unlock(int idx)
466467
rcu_read_unlock();
467468
}
468469

470+
static int rcu_torture_readlock_nesting(void)
471+
{
472+
if (IS_ENABLED(CONFIG_PREEMPT_RCU))
473+
return rcu_preempt_depth();
474+
if (IS_ENABLED(CONFIG_PREEMPT_COUNT))
475+
return (preempt_count() & PREEMPT_MASK);
476+
return -1;
477+
}
478+
469479
/*
470480
* Update callback in the pipe. This should be invoked after a grace period.
471481
*/
@@ -555,6 +565,7 @@ static struct rcu_torture_ops rcu_ops = {
555565
.read_delay = rcu_read_delay,
556566
.readunlock = rcu_torture_read_unlock,
557567
.readlock_held = torture_readlock_not_held,
568+
.readlock_nesting = rcu_torture_readlock_nesting,
558569
.get_gp_seq = rcu_get_gp_seq,
559570
.gp_diff = rcu_seq_diff,
560571
.deferred_free = rcu_torture_deferred_free,
@@ -1847,6 +1858,44 @@ static void rcu_torture_reader_do_mbchk(long myid, struct rcu_torture *rtp,
18471858
smp_store_release(&rtrcp_assigner->rtc_chkrdr, -1); // Assigner can again assign.
18481859
}
18491860

1861+
// Verify the specified RCUTORTURE_RDR* state.
1862+
#define ROEC_ARGS "%s %s: Current %#x To add %#x To remove %#x\n", __func__, s, curstate, new, old
1863+
static void rcutorture_one_extend_check(char *s, int curstate, int new, int old, bool insoftirq)
1864+
{
1865+
if (!IS_ENABLED(CONFIG_RCU_TORTURE_TEST_CHK_RDR_STATE))
1866+
return;
1867+
1868+
WARN_ONCE(!(curstate & RCUTORTURE_RDR_IRQ) && irqs_disabled(), ROEC_ARGS);
1869+
WARN_ONCE((curstate & RCUTORTURE_RDR_IRQ) && !irqs_disabled(), ROEC_ARGS);
1870+
1871+
// If CONFIG_PREEMPT_COUNT=n, further checks are unreliable.
1872+
if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
1873+
return;
1874+
1875+
WARN_ONCE((curstate & (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH)) &&
1876+
!(preempt_count() & SOFTIRQ_MASK), ROEC_ARGS);
1877+
WARN_ONCE((curstate & (RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED)) &&
1878+
!(preempt_count() & PREEMPT_MASK), ROEC_ARGS);
1879+
WARN_ONCE(cur_ops->readlock_nesting &&
1880+
(curstate & (RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2)) &&
1881+
cur_ops->readlock_nesting() == 0, ROEC_ARGS);
1882+
1883+
// Timer handlers have all sorts of stuff disabled, so ignore
1884+
// unintended disabling.
1885+
if (insoftirq)
1886+
return;
1887+
1888+
WARN_ONCE(cur_ops->extendables &&
1889+
!(curstate & (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH)) &&
1890+
(preempt_count() & SOFTIRQ_MASK), ROEC_ARGS);
1891+
WARN_ONCE(cur_ops->extendables &&
1892+
!(curstate & (RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED)) &&
1893+
(preempt_count() & PREEMPT_MASK), ROEC_ARGS);
1894+
WARN_ONCE(cur_ops->readlock_nesting &&
1895+
!(curstate & (RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2)) &&
1896+
cur_ops->readlock_nesting() > 0, ROEC_ARGS);
1897+
}
1898+
18501899
/*
18511900
* Do one extension of an RCU read-side critical section using the
18521901
* current reader state in readstate (set to zero for initial entry
@@ -1856,7 +1905,7 @@ static void rcu_torture_reader_do_mbchk(long myid, struct rcu_torture *rtp,
18561905
* beginning or end of the critical section and if there was actually a
18571906
* change, do a ->read_delay().
18581907
*/
1859-
static void rcutorture_one_extend(int *readstate, int newstate,
1908+
static void rcutorture_one_extend(int *readstate, int newstate, bool insoftirq,
18601909
struct torture_random_state *trsp,
18611910
struct rt_read_seg *rtrsp)
18621911
{
@@ -1870,6 +1919,7 @@ static void rcutorture_one_extend(int *readstate, int newstate,
18701919

18711920
WARN_ON_ONCE(idxold2 < 0);
18721921
WARN_ON_ONCE(idxold2 & ~RCUTORTURE_RDR_ALLBITS);
1922+
rcutorture_one_extend_check("before change", idxold1, statesnew, statesold, insoftirq);
18731923
rtrsp->rt_readstate = newstate;
18741924
if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST_LOG_CPU))
18751925
rtrsp->rt_cpu = raw_smp_processor_id();
@@ -1890,6 +1940,10 @@ static void rcutorture_one_extend(int *readstate, int newstate,
18901940
if (statesnew & RCUTORTURE_RDR_RCU_2)
18911941
idxnew2 = (cur_ops->readlock() << RCUTORTURE_RDR_SHIFT_2) & RCUTORTURE_RDR_MASK_2;
18921942

1943+
// Complain unless both the old and the new protection is in place.
1944+
rcutorture_one_extend_check("during change",
1945+
idxold1 | statesnew, statesnew, statesold, insoftirq);
1946+
18931947
/*
18941948
* Next, remove old protection, in decreasing order of strength
18951949
* to avoid unlock paths that aren't safe in the stronger
@@ -1940,6 +1994,7 @@ static void rcutorture_one_extend(int *readstate, int newstate,
19401994
WARN_ON_ONCE(*readstate < 0);
19411995
if (WARN_ON_ONCE(*readstate & ~RCUTORTURE_RDR_ALLBITS))
19421996
pr_info("Unexpected readstate value of %#x\n", *readstate);
1997+
rcutorture_one_extend_check("after change", *readstate, statesnew, statesold, insoftirq);
19431998
}
19441999

19452000
/* Return the biggest extendables mask given current RCU and boot parameters. */
@@ -2006,7 +2061,7 @@ rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp)
20062061
* critical section.
20072062
*/
20082063
static struct rt_read_seg *
2009-
rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp,
2064+
rcutorture_loop_extend(int *readstate, bool insoftirq, struct torture_random_state *trsp,
20102065
struct rt_read_seg *rtrsp)
20112066
{
20122067
int i;
@@ -2021,7 +2076,7 @@ rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp,
20212076
i = ((i | (i >> 3)) & RCUTORTURE_RDR_MAX_LOOPS) + 1;
20222077
for (j = 0; j < i; j++) {
20232078
mask = rcutorture_extend_mask(*readstate, trsp);
2024-
rcutorture_one_extend(readstate, mask, trsp, &rtrsp[j]);
2079+
rcutorture_one_extend(readstate, mask, insoftirq, trsp, &rtrsp[j]);
20252080
}
20262081
return &rtrsp[j];
20272082
}
@@ -2051,7 +2106,7 @@ static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid)
20512106

20522107
WARN_ON_ONCE(!rcu_is_watching());
20532108
newstate = rcutorture_extend_mask(readstate, trsp);
2054-
rcutorture_one_extend(&readstate, newstate, trsp, rtrsp++);
2109+
rcutorture_one_extend(&readstate, newstate, myid < 0, trsp, rtrsp++);
20552110
if (checkpolling) {
20562111
if (cur_ops->get_gp_state && cur_ops->poll_gp_state)
20572112
cookie = cur_ops->get_gp_state();
@@ -2064,13 +2119,13 @@ static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid)
20642119
!cur_ops->readlock_held || cur_ops->readlock_held());
20652120
if (p == NULL) {
20662121
/* Wait for rcu_torture_writer to get underway */
2067-
rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
2122+
rcutorture_one_extend(&readstate, 0, myid < 0, trsp, rtrsp);
20682123
return false;
20692124
}
20702125
if (p->rtort_mbtest == 0)
20712126
atomic_inc(&n_rcu_torture_mberror);
20722127
rcu_torture_reader_do_mbchk(myid, p, trsp);
2073-
rtrsp = rcutorture_loop_extend(&readstate, trsp, rtrsp);
2128+
rtrsp = rcutorture_loop_extend(&readstate, myid < 0, trsp, rtrsp);
20742129
preempt_disable();
20752130
pipe_count = READ_ONCE(p->rtort_pipe_count);
20762131
if (pipe_count > RCU_TORTURE_PIPE_LEN) {
@@ -2112,7 +2167,7 @@ static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid)
21122167
preempted = cur_ops->reader_blocked();
21132168
if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST_LOG_CPU))
21142169
rt_last_cpu = raw_smp_processor_id();
2115-
rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
2170+
rcutorture_one_extend(&readstate, 0, myid < 0, trsp, rtrsp);
21162171
WARN_ON_ONCE(readstate);
21172172
// This next splat is expected behavior if leakpointer, especially
21182173
// for CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels.

0 commit comments

Comments
 (0)