Skip to content

Commit c99303a

Browse files
crwood-rhKAGA-KOKO
authored andcommitted
genirq: Wake interrupt threads immediately when changing affinity
The affinity setting of interrupt threads happens in the context of the thread when the thread is woken up by an hard interrupt. As this can be an arbitrary after changing the affinity, the thread can become runnable on an isolated CPU and cause isolation disruption. Avoid this by checking the set affinity request in wait_for_interrupt() and waking the threads immediately when the affinity is modified. Note that this is of the most benefit on systems where the interrupt affinity itself does not need to be deferred to the interrupt handler, but even where that's not the case, the total dirsuption will be less. Signed-off-by: Crystal Wood <crwood@redhat.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Link: https://lore.kernel.org/r/20240122235353.15235-1-crwood@redhat.com
1 parent ee4c159 commit c99303a

File tree

1 file changed

+55
-54
lines changed

1 file changed

+55
-54
lines changed

kernel/irq/manage.c

Lines changed: 55 additions & 54 deletions
Original file line numberDiff line numberDiff line change
@@ -192,10 +192,14 @@ void irq_set_thread_affinity(struct irq_desc *desc)
192192
struct irqaction *action;
193193

194194
for_each_action_of_desc(desc, action) {
195-
if (action->thread)
195+
if (action->thread) {
196196
set_bit(IRQTF_AFFINITY, &action->thread_flags);
197-
if (action->secondary && action->secondary->thread)
197+
wake_up_process(action->thread);
198+
}
199+
if (action->secondary && action->secondary->thread) {
198200
set_bit(IRQTF_AFFINITY, &action->secondary->thread_flags);
201+
wake_up_process(action->secondary->thread);
202+
}
199203
}
200204
}
201205

@@ -1049,10 +1053,57 @@ static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
10491053
return IRQ_NONE;
10501054
}
10511055

1052-
static int irq_wait_for_interrupt(struct irqaction *action)
1056+
#ifdef CONFIG_SMP
1057+
/*
1058+
* Check whether we need to change the affinity of the interrupt thread.
1059+
*/
1060+
static void irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
1061+
{
1062+
cpumask_var_t mask;
1063+
bool valid = false;
1064+
1065+
if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
1066+
return;
1067+
1068+
__set_current_state(TASK_RUNNING);
1069+
1070+
/*
1071+
* In case we are out of memory we set IRQTF_AFFINITY again and
1072+
* try again next time
1073+
*/
1074+
if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
1075+
set_bit(IRQTF_AFFINITY, &action->thread_flags);
1076+
return;
1077+
}
1078+
1079+
raw_spin_lock_irq(&desc->lock);
1080+
/*
1081+
* This code is triggered unconditionally. Check the affinity
1082+
* mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
1083+
*/
1084+
if (cpumask_available(desc->irq_common_data.affinity)) {
1085+
const struct cpumask *m;
1086+
1087+
m = irq_data_get_effective_affinity_mask(&desc->irq_data);
1088+
cpumask_copy(mask, m);
1089+
valid = true;
1090+
}
1091+
raw_spin_unlock_irq(&desc->lock);
1092+
1093+
if (valid)
1094+
set_cpus_allowed_ptr(current, mask);
1095+
free_cpumask_var(mask);
1096+
}
1097+
#else
1098+
static inline void irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
1099+
#endif
1100+
1101+
static int irq_wait_for_interrupt(struct irq_desc *desc,
1102+
struct irqaction *action)
10531103
{
10541104
for (;;) {
10551105
set_current_state(TASK_INTERRUPTIBLE);
1106+
irq_thread_check_affinity(desc, action);
10561107

10571108
if (kthread_should_stop()) {
10581109
/* may need to run one last time */
@@ -1129,52 +1180,6 @@ static void irq_finalize_oneshot(struct irq_desc *desc,
11291180
chip_bus_sync_unlock(desc);
11301181
}
11311182

1132-
#ifdef CONFIG_SMP
1133-
/*
1134-
* Check whether we need to change the affinity of the interrupt thread.
1135-
*/
1136-
static void
1137-
irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
1138-
{
1139-
cpumask_var_t mask;
1140-
bool valid = true;
1141-
1142-
if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
1143-
return;
1144-
1145-
/*
1146-
* In case we are out of memory we set IRQTF_AFFINITY again and
1147-
* try again next time
1148-
*/
1149-
if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
1150-
set_bit(IRQTF_AFFINITY, &action->thread_flags);
1151-
return;
1152-
}
1153-
1154-
raw_spin_lock_irq(&desc->lock);
1155-
/*
1156-
* This code is triggered unconditionally. Check the affinity
1157-
* mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
1158-
*/
1159-
if (cpumask_available(desc->irq_common_data.affinity)) {
1160-
const struct cpumask *m;
1161-
1162-
m = irq_data_get_effective_affinity_mask(&desc->irq_data);
1163-
cpumask_copy(mask, m);
1164-
} else {
1165-
valid = false;
1166-
}
1167-
raw_spin_unlock_irq(&desc->lock);
1168-
1169-
if (valid)
1170-
set_cpus_allowed_ptr(current, mask);
1171-
free_cpumask_var(mask);
1172-
}
1173-
#else
1174-
static inline void
1175-
irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
1176-
#endif
1177-
11781183
/*
11791184
* Interrupts which are not explicitly requested as threaded
11801185
* interrupts rely on the implicit bh/preempt disable of the hard irq
@@ -1312,13 +1317,9 @@ static int irq_thread(void *data)
13121317
init_task_work(&on_exit_work, irq_thread_dtor);
13131318
task_work_add(current, &on_exit_work, TWA_NONE);
13141319

1315-
irq_thread_check_affinity(desc, action);
1316-
1317-
while (!irq_wait_for_interrupt(action)) {
1320+
while (!irq_wait_for_interrupt(desc, action)) {
13181321
irqreturn_t action_ret;
13191322

1320-
irq_thread_check_affinity(desc, action);
1321-
13221323
action_ret = handler_fn(desc, action);
13231324
if (action_ret == IRQ_WAKE_THREAD)
13241325
irq_wake_secondary(desc, action);

0 commit comments

Comments
 (0)