@@ -3987,13 +3987,16 @@ static void rcu_barrier_callback(struct rcu_head *rhp)
3987
3987
}
3988
3988
3989
3989
/*
3990
- * Called with preemption disabled, and from cross-cpu IRQ context .
3990
+ * If needed, entrain an rcu_barrier() callback on rdp->cblist .
3991
3991
*/
3992
- static void rcu_barrier_func ( void * cpu_in )
3992
+ static void rcu_barrier_entrain ( struct rcu_data * rdp )
3993
3993
{
3994
- uintptr_t cpu = ( uintptr_t ) cpu_in ;
3995
- struct rcu_data * rdp = per_cpu_ptr ( & rcu_data , cpu );
3994
+ unsigned long gseq = READ_ONCE ( rcu_state . barrier_sequence ) ;
3995
+ unsigned long lseq = READ_ONCE ( rdp -> barrier_seq_snap );
3996
3996
3997
+ lockdep_assert_held (& rdp -> barrier_lock );
3998
+ if (rcu_seq_state (lseq ) || !rcu_seq_state (gseq ) || rcu_seq_ctr (lseq ) != rcu_seq_ctr (gseq ))
3999
+ return ;
3997
4000
rcu_barrier_trace (TPS ("IRQ" ), -1 , rcu_state .barrier_sequence );
3998
4001
rdp -> barrier_head .func = rcu_barrier_callback ;
3999
4002
debug_rcu_head_queue (& rdp -> barrier_head );
@@ -4003,10 +4006,26 @@ static void rcu_barrier_func(void *cpu_in)
4003
4006
atomic_inc (& rcu_state .barrier_cpu_count );
4004
4007
} else {
4005
4008
debug_rcu_head_unqueue (& rdp -> barrier_head );
4006
- rcu_barrier_trace (TPS ("IRQNQ" ), -1 ,
4007
- rcu_state .barrier_sequence );
4009
+ rcu_barrier_trace (TPS ("IRQNQ" ), -1 , rcu_state .barrier_sequence );
4008
4010
}
4009
4011
rcu_nocb_unlock (rdp );
4012
+ smp_store_release (& rdp -> barrier_seq_snap , gseq );
4013
+ }
4014
+
4015
+ /*
4016
+ * Called with preemption disabled, and from cross-cpu IRQ context.
4017
+ */
4018
+ static void rcu_barrier_handler (void * cpu_in )
4019
+ {
4020
+ uintptr_t cpu = (uintptr_t )cpu_in ;
4021
+ struct rcu_data * rdp = per_cpu_ptr (& rcu_data , cpu );
4022
+
4023
+ lockdep_assert_irqs_disabled ();
4024
+ WARN_ON_ONCE (cpu != rdp -> cpu );
4025
+ WARN_ON_ONCE (cpu != smp_processor_id ());
4026
+ raw_spin_lock (& rdp -> barrier_lock );
4027
+ rcu_barrier_entrain (rdp );
4028
+ raw_spin_unlock (& rdp -> barrier_lock );
4010
4029
}
4011
4030
4012
4031
/**
@@ -4020,6 +4039,8 @@ static void rcu_barrier_func(void *cpu_in)
4020
4039
void rcu_barrier (void )
4021
4040
{
4022
4041
uintptr_t cpu ;
4042
+ unsigned long flags ;
4043
+ unsigned long gseq ;
4023
4044
struct rcu_data * rdp ;
4024
4045
unsigned long s = rcu_seq_snap (& rcu_state .barrier_sequence );
4025
4046
@@ -4038,6 +4059,7 @@ void rcu_barrier(void)
4038
4059
4039
4060
/* Mark the start of the barrier operation. */
4040
4061
rcu_seq_start (& rcu_state .barrier_sequence );
4062
+ gseq = rcu_state .barrier_sequence ;
4041
4063
rcu_barrier_trace (TPS ("Inc1" ), -1 , rcu_state .barrier_sequence );
4042
4064
4043
4065
/*
@@ -4058,19 +4080,30 @@ void rcu_barrier(void)
4058
4080
*/
4059
4081
for_each_possible_cpu (cpu ) {
4060
4082
rdp = per_cpu_ptr (& rcu_data , cpu );
4083
+ retry :
4084
+ if (smp_load_acquire (& rdp -> barrier_seq_snap ) == gseq )
4085
+ continue ;
4086
+ raw_spin_lock_irqsave (& rdp -> barrier_lock , flags );
4061
4087
if (!rcu_segcblist_n_cbs (& rdp -> cblist )) {
4088
+ WRITE_ONCE (rdp -> barrier_seq_snap , gseq );
4089
+ raw_spin_unlock_irqrestore (& rdp -> barrier_lock , flags );
4062
4090
rcu_barrier_trace (TPS ("NQ" ), cpu , rcu_state .barrier_sequence );
4063
4091
continue ;
4064
4092
}
4065
- if (cpu_online ( cpu )) {
4066
- rcu_barrier_trace ( TPS ( "OnlineQ" ), cpu , rcu_state . barrier_sequence );
4067
- smp_call_function_single ( cpu , rcu_barrier_func , ( void * ) cpu , 1 );
4068
- } else {
4093
+ if (! rcu_rdp_cpu_online ( rdp )) {
4094
+ rcu_barrier_entrain ( rdp );
4095
+ WARN_ON_ONCE ( READ_ONCE ( rdp -> barrier_seq_snap ) != gseq );
4096
+ raw_spin_unlock_irqrestore ( & rdp -> barrier_lock , flags );
4069
4097
rcu_barrier_trace (TPS ("OfflineNoCBQ" ), cpu , rcu_state .barrier_sequence );
4070
- local_irq_disable ();
4071
- rcu_barrier_func ((void * )cpu );
4072
- local_irq_enable ();
4098
+ continue ;
4073
4099
}
4100
+ raw_spin_unlock_irqrestore (& rdp -> barrier_lock , flags );
4101
+ if (smp_call_function_single (cpu , rcu_barrier_handler , (void * )cpu , 1 )) {
4102
+ schedule_timeout_uninterruptible (1 );
4103
+ goto retry ;
4104
+ }
4105
+ WARN_ON_ONCE (READ_ONCE (rdp -> barrier_seq_snap ) != gseq );
4106
+ rcu_barrier_trace (TPS ("OnlineQ" ), cpu , rcu_state .barrier_sequence );
4074
4107
}
4075
4108
cpus_read_unlock ();
4076
4109
@@ -4087,6 +4120,12 @@ void rcu_barrier(void)
4087
4120
/* Mark the end of the barrier operation. */
4088
4121
rcu_barrier_trace (TPS ("Inc2" ), -1 , rcu_state .barrier_sequence );
4089
4122
rcu_seq_end (& rcu_state .barrier_sequence );
4123
+ gseq = rcu_state .barrier_sequence ;
4124
+ for_each_possible_cpu (cpu ) {
4125
+ rdp = per_cpu_ptr (& rcu_data , cpu );
4126
+
4127
+ WRITE_ONCE (rdp -> barrier_seq_snap , gseq );
4128
+ }
4090
4129
4091
4130
/* Other rcu_barrier() invocations can now safely proceed. */
4092
4131
mutex_unlock (& rcu_state .barrier_mutex );
@@ -4134,6 +4173,8 @@ rcu_boot_init_percpu_data(int cpu)
4134
4173
INIT_WORK (& rdp -> strict_work , strict_work_handler );
4135
4174
WARN_ON_ONCE (rdp -> dynticks_nesting != 1 );
4136
4175
WARN_ON_ONCE (rcu_dynticks_in_eqs (rcu_dynticks_snap (rdp )));
4176
+ raw_spin_lock_init (& rdp -> barrier_lock );
4177
+ rdp -> barrier_seq_snap = rcu_state .barrier_sequence ;
4137
4178
rdp -> rcu_ofl_gp_seq = rcu_state .gp_seq ;
4138
4179
rdp -> rcu_ofl_gp_flags = RCU_GP_CLEANED ;
4139
4180
rdp -> rcu_onl_gp_seq = rcu_state .gp_seq ;
@@ -4284,8 +4325,10 @@ void rcu_cpu_starting(unsigned int cpu)
4284
4325
local_irq_save (flags );
4285
4326
arch_spin_lock (& rcu_state .ofl_lock );
4286
4327
rcu_dynticks_eqs_online ();
4328
+ raw_spin_lock (& rdp -> barrier_lock );
4287
4329
raw_spin_lock_rcu_node (rnp );
4288
4330
WRITE_ONCE (rnp -> qsmaskinitnext , rnp -> qsmaskinitnext | mask );
4331
+ raw_spin_unlock (& rdp -> barrier_lock );
4289
4332
newcpu = !(rnp -> expmaskinitnext & mask );
4290
4333
rnp -> expmaskinitnext |= mask ;
4291
4334
/* Allow lockless access for expedited grace periods. */
@@ -4372,7 +4415,9 @@ void rcutree_migrate_callbacks(int cpu)
4372
4415
rcu_segcblist_empty (& rdp -> cblist ))
4373
4416
return ; /* No callbacks to migrate. */
4374
4417
4375
- local_irq_save (flags );
4418
+ raw_spin_lock_irqsave (& rdp -> barrier_lock , flags );
4419
+ WARN_ON_ONCE (rcu_rdp_cpu_online (rdp ));
4420
+ rcu_barrier_entrain (rdp );
4376
4421
my_rdp = this_cpu_ptr (& rcu_data );
4377
4422
my_rnp = my_rdp -> mynode ;
4378
4423
rcu_nocb_lock (my_rdp ); /* irqs already disabled. */
@@ -4382,10 +4427,10 @@ void rcutree_migrate_callbacks(int cpu)
4382
4427
needwake = rcu_advance_cbs (my_rnp , rdp ) ||
4383
4428
rcu_advance_cbs (my_rnp , my_rdp );
4384
4429
rcu_segcblist_merge (& my_rdp -> cblist , & rdp -> cblist );
4430
+ raw_spin_unlock (& rdp -> barrier_lock ); /* irqs remain disabled. */
4385
4431
needwake = needwake || rcu_advance_cbs (my_rnp , my_rdp );
4386
4432
rcu_segcblist_disable (& rdp -> cblist );
4387
- WARN_ON_ONCE (rcu_segcblist_empty (& my_rdp -> cblist ) !=
4388
- !rcu_segcblist_n_cbs (& my_rdp -> cblist ));
4433
+ WARN_ON_ONCE (rcu_segcblist_empty (& my_rdp -> cblist ) != !rcu_segcblist_n_cbs (& my_rdp -> cblist ));
4389
4434
if (rcu_rdp_is_offloaded (my_rdp )) {
4390
4435
raw_spin_unlock_rcu_node (my_rnp ); /* irqs remain disabled. */
4391
4436
__call_rcu_nocb_wake (my_rdp , true, flags );
0 commit comments