@@ -192,10 +192,14 @@ void irq_set_thread_affinity(struct irq_desc *desc)
192
192
struct irqaction * action ;
193
193
194
194
for_each_action_of_desc (desc , action ) {
195
- if (action -> thread )
195
+ if (action -> thread ) {
196
196
set_bit (IRQTF_AFFINITY , & action -> thread_flags );
197
- if (action -> secondary && action -> secondary -> thread )
197
+ wake_up_process (action -> thread );
198
+ }
199
+ if (action -> secondary && action -> secondary -> thread ) {
198
200
set_bit (IRQTF_AFFINITY , & action -> secondary -> thread_flags );
201
+ wake_up_process (action -> secondary -> thread );
202
+ }
199
203
}
200
204
}
201
205
@@ -1049,10 +1053,57 @@ static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
1049
1053
return IRQ_NONE ;
1050
1054
}
1051
1055
1052
- static int irq_wait_for_interrupt (struct irqaction * action )
1056
+ #ifdef CONFIG_SMP
1057
+ /*
1058
+ * Check whether we need to change the affinity of the interrupt thread.
1059
+ */
1060
+ static void irq_thread_check_affinity (struct irq_desc * desc , struct irqaction * action )
1061
+ {
1062
+ cpumask_var_t mask ;
1063
+ bool valid = false;
1064
+
1065
+ if (!test_and_clear_bit (IRQTF_AFFINITY , & action -> thread_flags ))
1066
+ return ;
1067
+
1068
+ __set_current_state (TASK_RUNNING );
1069
+
1070
+ /*
1071
+ * In case we are out of memory we set IRQTF_AFFINITY again and
1072
+ * try again next time
1073
+ */
1074
+ if (!alloc_cpumask_var (& mask , GFP_KERNEL )) {
1075
+ set_bit (IRQTF_AFFINITY , & action -> thread_flags );
1076
+ return ;
1077
+ }
1078
+
1079
+ raw_spin_lock_irq (& desc -> lock );
1080
+ /*
1081
+ * This code is triggered unconditionally. Check the affinity
1082
+ * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
1083
+ */
1084
+ if (cpumask_available (desc -> irq_common_data .affinity )) {
1085
+ const struct cpumask * m ;
1086
+
1087
+ m = irq_data_get_effective_affinity_mask (& desc -> irq_data );
1088
+ cpumask_copy (mask , m );
1089
+ valid = true;
1090
+ }
1091
+ raw_spin_unlock_irq (& desc -> lock );
1092
+
1093
+ if (valid )
1094
+ set_cpus_allowed_ptr (current , mask );
1095
+ free_cpumask_var (mask );
1096
+ }
1097
+ #else
1098
+ static inline void irq_thread_check_affinity (struct irq_desc * desc , struct irqaction * action ) { }
1099
+ #endif
1100
+
1101
+ static int irq_wait_for_interrupt (struct irq_desc * desc ,
1102
+ struct irqaction * action )
1053
1103
{
1054
1104
for (;;) {
1055
1105
set_current_state (TASK_INTERRUPTIBLE );
1106
+ irq_thread_check_affinity (desc , action );
1056
1107
1057
1108
if (kthread_should_stop ()) {
1058
1109
/* may need to run one last time */
@@ -1129,52 +1180,6 @@ static void irq_finalize_oneshot(struct irq_desc *desc,
1129
1180
chip_bus_sync_unlock (desc );
1130
1181
}
1131
1182
1132
- #ifdef CONFIG_SMP
1133
- /*
1134
- * Check whether we need to change the affinity of the interrupt thread.
1135
- */
1136
- static void
1137
- irq_thread_check_affinity (struct irq_desc * desc , struct irqaction * action )
1138
- {
1139
- cpumask_var_t mask ;
1140
- bool valid = true;
1141
-
1142
- if (!test_and_clear_bit (IRQTF_AFFINITY , & action -> thread_flags ))
1143
- return ;
1144
-
1145
- /*
1146
- * In case we are out of memory we set IRQTF_AFFINITY again and
1147
- * try again next time
1148
- */
1149
- if (!alloc_cpumask_var (& mask , GFP_KERNEL )) {
1150
- set_bit (IRQTF_AFFINITY , & action -> thread_flags );
1151
- return ;
1152
- }
1153
-
1154
- raw_spin_lock_irq (& desc -> lock );
1155
- /*
1156
- * This code is triggered unconditionally. Check the affinity
1157
- * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
1158
- */
1159
- if (cpumask_available (desc -> irq_common_data .affinity )) {
1160
- const struct cpumask * m ;
1161
-
1162
- m = irq_data_get_effective_affinity_mask (& desc -> irq_data );
1163
- cpumask_copy (mask , m );
1164
- } else {
1165
- valid = false;
1166
- }
1167
- raw_spin_unlock_irq (& desc -> lock );
1168
-
1169
- if (valid )
1170
- set_cpus_allowed_ptr (current , mask );
1171
- free_cpumask_var (mask );
1172
- }
1173
- #else
1174
- static inline void
1175
- irq_thread_check_affinity (struct irq_desc * desc , struct irqaction * action ) { }
1176
- #endif
1177
-
1178
1183
/*
1179
1184
* Interrupts which are not explicitly requested as threaded
1180
1185
* interrupts rely on the implicit bh/preempt disable of the hard irq
@@ -1312,13 +1317,9 @@ static int irq_thread(void *data)
1312
1317
init_task_work (& on_exit_work , irq_thread_dtor );
1313
1318
task_work_add (current , & on_exit_work , TWA_NONE );
1314
1319
1315
- irq_thread_check_affinity (desc , action );
1316
-
1317
- while (!irq_wait_for_interrupt (action )) {
1320
+ while (!irq_wait_for_interrupt (desc , action )) {
1318
1321
irqreturn_t action_ret ;
1319
1322
1320
- irq_thread_check_affinity (desc , action );
1321
-
1322
1323
action_ret = handler_fn (desc , action );
1323
1324
if (action_ret == IRQ_WAKE_THREAD )
1324
1325
irq_wake_secondary (desc , action );
0 commit comments