Skip to content

Commit 0e1ff67

Browse files
Maksim DavydovIngo Molnar
authored andcommitted
x86/split_lock: Simplify reenabling
When split_lock_mitigate is disabled, each CPU needs its own delayed_work structure. They are used to reenable split lock detection after its disabling. But delayed_work structure must be correctly initialized after its allocation. Current implementation uses deferred initialization that makes the split lock handler code unclear. The code can be simplified a bit if the initialization is moved to the appropriate initcall. sld_setup() is called before setup_per_cpu_areas(), thus it can't be used for this purpose, so introduce an independent initcall for the initialization. [ mingo: Simplified the 'work' assignment line a bit more. ] Signed-off-by: Maksim Davydov <davydov-max@yandex-team.ru> Signed-off-by: Ingo Molnar <mingo@kernel.org> Link: https://lore.kernel.org/r/20250325085807.171885-1-davydov-max@yandex-team.ru
1 parent 2df0c02 commit 0e1ff67

File tree

1 file changed

+24
-11
lines changed

1 file changed

+24
-11
lines changed

arch/x86/kernel/cpu/bus_lock.c

Lines changed: 24 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -200,6 +200,26 @@ static void __split_lock_reenable(struct work_struct *work)
200200
*/
201201
static DEFINE_PER_CPU(struct delayed_work, sl_reenable);
202202

203+
/*
204+
* Per-CPU delayed_work can't be statically initialized properly because
205+
* the struct address is unknown. Thus per-CPU delayed_work structures
206+
* have to be initialized during kernel initialization and after calling
207+
* setup_per_cpu_areas().
208+
*/
209+
static int __init setup_split_lock_delayed_work(void)
210+
{
211+
unsigned int cpu;
212+
213+
for_each_possible_cpu(cpu) {
214+
struct delayed_work *work = per_cpu_ptr(&sl_reenable, cpu);
215+
216+
INIT_DELAYED_WORK(work, __split_lock_reenable);
217+
}
218+
219+
return 0;
220+
}
221+
pure_initcall(setup_split_lock_delayed_work);
222+
203223
/*
204224
* If a CPU goes offline with pending delayed work to re-enable split lock
205225
* detection then the delayed work will be executed on some other CPU. That
@@ -219,15 +239,16 @@ static int splitlock_cpu_offline(unsigned int cpu)
219239

220240
static void split_lock_warn(unsigned long ip)
221241
{
222-
struct delayed_work *work = NULL;
242+
struct delayed_work *work;
223243
int cpu;
244+
unsigned int saved_sld_mitigate = READ_ONCE(sysctl_sld_mitigate);
224245

225246
if (!current->reported_split_lock)
226247
pr_warn_ratelimited("#AC: %s/%d took a split_lock trap at address: 0x%lx\n",
227248
current->comm, current->pid, ip);
228249
current->reported_split_lock = 1;
229250

230-
if (sysctl_sld_mitigate) {
251+
if (saved_sld_mitigate) {
231252
/*
232253
* misery factor #1:
233254
* sleep 10ms before trying to execute split lock.
@@ -240,18 +261,10 @@ static void split_lock_warn(unsigned long ip)
240261
*/
241262
if (down_interruptible(&buslock_sem) == -EINTR)
242263
return;
243-
work = &sl_reenable_unlock;
244264
}
245265

246266
cpu = get_cpu();
247-
248-
if (!work) {
249-
work = this_cpu_ptr(&sl_reenable);
250-
/* Deferred initialization of per-CPU struct */
251-
if (!work->work.func)
252-
INIT_DELAYED_WORK(work, __split_lock_reenable);
253-
}
254-
267+
work = saved_sld_mitigate ? &sl_reenable_unlock : per_cpu_ptr(&sl_reenable, cpu);
255268
schedule_delayed_work_on(cpu, work, 2);
256269

257270
/* Disable split lock detection on this CPU to make progress */

0 commit comments

Comments
 (0)