Skip to content

Commit 54c7939

Browse files
committed
kprobes: Use guard() for external locks
Use guard() for text_mutex, cpu_read_lock, and jump_label_lock in the kprobes. Link: https://lore.kernel.org/all/173371208663.480397.7535769878667655223.stgit@devnote2/ Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
1 parent e2b6e5e commit 54c7939

File tree

1 file changed

+90
-119
lines changed

1 file changed

+90
-119
lines changed

kernel/kprobes.c

Lines changed: 90 additions & 119 deletions
Original file line numberDiff line numberDiff line change
@@ -596,41 +596,38 @@ static void kick_kprobe_optimizer(void)
596596
/* Kprobe jump optimizer */
597597
static void kprobe_optimizer(struct work_struct *work)
598598
{
599-
mutex_lock(&kprobe_mutex);
600-
cpus_read_lock();
601-
mutex_lock(&text_mutex);
599+
guard(mutex)(&kprobe_mutex);
602600

603-
/*
604-
* Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed)
605-
* kprobes before waiting for quiesence period.
606-
*/
607-
do_unoptimize_kprobes();
601+
scoped_guard(cpus_read_lock) {
602+
guard(mutex)(&text_mutex);
608603

609-
/*
610-
* Step 2: Wait for quiesence period to ensure all potentially
611-
* preempted tasks to have normally scheduled. Because optprobe
612-
* may modify multiple instructions, there is a chance that Nth
613-
* instruction is preempted. In that case, such tasks can return
614-
* to 2nd-Nth byte of jump instruction. This wait is for avoiding it.
615-
* Note that on non-preemptive kernel, this is transparently converted
616-
* to synchronoze_sched() to wait for all interrupts to have completed.
617-
*/
618-
synchronize_rcu_tasks();
604+
/*
605+
* Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed)
606+
* kprobes before waiting for quiesence period.
607+
*/
608+
do_unoptimize_kprobes();
619609

620-
/* Step 3: Optimize kprobes after quiesence period */
621-
do_optimize_kprobes();
610+
/*
611+
* Step 2: Wait for quiesence period to ensure all potentially
612+
* preempted tasks to have normally scheduled. Because optprobe
613+
* may modify multiple instructions, there is a chance that Nth
614+
* instruction is preempted. In that case, such tasks can return
615+
* to 2nd-Nth byte of jump instruction. This wait is for avoiding it.
616+
* Note that on non-preemptive kernel, this is transparently converted
617+
* to synchronoze_sched() to wait for all interrupts to have completed.
618+
*/
619+
synchronize_rcu_tasks();
622620

623-
/* Step 4: Free cleaned kprobes after quiesence period */
624-
do_free_cleaned_kprobes();
621+
/* Step 3: Optimize kprobes after quiesence period */
622+
do_optimize_kprobes();
625623

626-
mutex_unlock(&text_mutex);
627-
cpus_read_unlock();
624+
/* Step 4: Free cleaned kprobes after quiesence period */
625+
do_free_cleaned_kprobes();
626+
}
628627

629628
/* Step 5: Kick optimizer again if needed */
630629
if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list))
631630
kick_kprobe_optimizer();
632-
633-
mutex_unlock(&kprobe_mutex);
634631
}
635632

636633
static void wait_for_kprobe_optimizer_locked(void)
@@ -853,29 +850,24 @@ static void try_to_optimize_kprobe(struct kprobe *p)
853850
return;
854851

855852
/* For preparing optimization, jump_label_text_reserved() is called. */
856-
cpus_read_lock();
857-
jump_label_lock();
858-
mutex_lock(&text_mutex);
853+
guard(cpus_read_lock)();
854+
guard(jump_label_lock)();
855+
guard(mutex)(&text_mutex);
859856

860857
ap = alloc_aggr_kprobe(p);
861858
if (!ap)
862-
goto out;
859+
return;
863860

864861
op = container_of(ap, struct optimized_kprobe, kp);
865862
if (!arch_prepared_optinsn(&op->optinsn)) {
866863
/* If failed to setup optimizing, fallback to kprobe. */
867864
arch_remove_optimized_kprobe(op);
868865
kfree(op);
869-
goto out;
866+
return;
870867
}
871868

872869
init_aggr_kprobe(ap, p);
873870
optimize_kprobe(ap); /* This just kicks optimizer thread. */
874-
875-
out:
876-
mutex_unlock(&text_mutex);
877-
jump_label_unlock();
878-
cpus_read_unlock();
879871
}
880872

881873
static void optimize_all_kprobes(void)
@@ -1158,12 +1150,9 @@ static int arm_kprobe(struct kprobe *kp)
11581150
if (unlikely(kprobe_ftrace(kp)))
11591151
return arm_kprobe_ftrace(kp);
11601152

1161-
cpus_read_lock();
1162-
mutex_lock(&text_mutex);
1153+
guard(cpus_read_lock)();
1154+
guard(mutex)(&text_mutex);
11631155
__arm_kprobe(kp);
1164-
mutex_unlock(&text_mutex);
1165-
cpus_read_unlock();
1166-
11671156
return 0;
11681157
}
11691158

@@ -1172,12 +1161,9 @@ static int disarm_kprobe(struct kprobe *kp, bool reopt)
11721161
if (unlikely(kprobe_ftrace(kp)))
11731162
return disarm_kprobe_ftrace(kp);
11741163

1175-
cpus_read_lock();
1176-
mutex_lock(&text_mutex);
1164+
guard(cpus_read_lock)();
1165+
guard(mutex)(&text_mutex);
11771166
__disarm_kprobe(kp, reopt);
1178-
mutex_unlock(&text_mutex);
1179-
cpus_read_unlock();
1180-
11811167
return 0;
11821168
}
11831169

@@ -1294,62 +1280,55 @@ static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p)
12941280
int ret = 0;
12951281
struct kprobe *ap = orig_p;
12961282

1297-
cpus_read_lock();
1298-
1299-
/* For preparing optimization, jump_label_text_reserved() is called */
1300-
jump_label_lock();
1301-
mutex_lock(&text_mutex);
1302-
1303-
if (!kprobe_aggrprobe(orig_p)) {
1304-
/* If 'orig_p' is not an 'aggr_kprobe', create new one. */
1305-
ap = alloc_aggr_kprobe(orig_p);
1306-
if (!ap) {
1307-
ret = -ENOMEM;
1308-
goto out;
1283+
scoped_guard(cpus_read_lock) {
1284+
/* For preparing optimization, jump_label_text_reserved() is called */
1285+
guard(jump_label_lock)();
1286+
guard(mutex)(&text_mutex);
1287+
1288+
if (!kprobe_aggrprobe(orig_p)) {
1289+
/* If 'orig_p' is not an 'aggr_kprobe', create new one. */
1290+
ap = alloc_aggr_kprobe(orig_p);
1291+
if (!ap)
1292+
return -ENOMEM;
1293+
init_aggr_kprobe(ap, orig_p);
1294+
} else if (kprobe_unused(ap)) {
1295+
/* This probe is going to die. Rescue it */
1296+
ret = reuse_unused_kprobe(ap);
1297+
if (ret)
1298+
return ret;
13091299
}
1310-
init_aggr_kprobe(ap, orig_p);
1311-
} else if (kprobe_unused(ap)) {
1312-
/* This probe is going to die. Rescue it */
1313-
ret = reuse_unused_kprobe(ap);
1314-
if (ret)
1315-
goto out;
1316-
}
13171300

1318-
if (kprobe_gone(ap)) {
1319-
/*
1320-
* Attempting to insert new probe at the same location that
1321-
* had a probe in the module vaddr area which already
1322-
* freed. So, the instruction slot has already been
1323-
* released. We need a new slot for the new probe.
1324-
*/
1325-
ret = arch_prepare_kprobe(ap);
1326-
if (ret)
1301+
if (kprobe_gone(ap)) {
13271302
/*
1328-
* Even if fail to allocate new slot, don't need to
1329-
* free the 'ap'. It will be used next time, or
1330-
* freed by unregister_kprobe().
1303+
* Attempting to insert new probe at the same location that
1304+
* had a probe in the module vaddr area which already
1305+
* freed. So, the instruction slot has already been
1306+
* released. We need a new slot for the new probe.
13311307
*/
1332-
goto out;
1308+
ret = arch_prepare_kprobe(ap);
1309+
if (ret)
1310+
/*
1311+
* Even if fail to allocate new slot, don't need to
1312+
* free the 'ap'. It will be used next time, or
1313+
* freed by unregister_kprobe().
1314+
*/
1315+
return ret;
13331316

1334-
/* Prepare optimized instructions if possible. */
1335-
prepare_optimized_kprobe(ap);
1317+
/* Prepare optimized instructions if possible. */
1318+
prepare_optimized_kprobe(ap);
13361319

1337-
/*
1338-
* Clear gone flag to prevent allocating new slot again, and
1339-
* set disabled flag because it is not armed yet.
1340-
*/
1341-
ap->flags = (ap->flags & ~KPROBE_FLAG_GONE)
1342-
| KPROBE_FLAG_DISABLED;
1343-
}
1344-
1345-
/* Copy the insn slot of 'p' to 'ap'. */
1346-
copy_kprobe(ap, p);
1347-
ret = add_new_kprobe(ap, p);
1320+
/*
1321+
* Clear gone flag to prevent allocating new slot again, and
1322+
* set disabled flag because it is not armed yet.
1323+
*/
1324+
ap->flags = (ap->flags & ~KPROBE_FLAG_GONE)
1325+
| KPROBE_FLAG_DISABLED;
1326+
}
13481327

1349-
out:
1350-
mutex_unlock(&text_mutex);
1351-
jump_label_unlock();
1352-
cpus_read_unlock();
1328+
/* Copy the insn slot of 'p' to 'ap'. */
1329+
copy_kprobe(ap, p);
1330+
ret = add_new_kprobe(ap, p);
1331+
}
13531332

13541333
if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) {
13551334
ap->flags &= ~KPROBE_FLAG_DISABLED;
@@ -1559,26 +1538,23 @@ static int check_kprobe_address_safe(struct kprobe *p,
15591538
ret = check_ftrace_location(p);
15601539
if (ret)
15611540
return ret;
1562-
jump_label_lock();
1541+
1542+
guard(jump_label_lock)();
15631543

15641544
/* Ensure the address is in a text area, and find a module if exists. */
15651545
*probed_mod = NULL;
15661546
if (!core_kernel_text((unsigned long) p->addr)) {
15671547
guard(preempt)();
15681548
*probed_mod = __module_text_address((unsigned long) p->addr);
1569-
if (!(*probed_mod)) {
1570-
ret = -EINVAL;
1571-
goto out;
1572-
}
1549+
if (!(*probed_mod))
1550+
return -EINVAL;
15731551

15741552
/*
15751553
* We must hold a refcount of the probed module while updating
15761554
* its code to prohibit unexpected unloading.
15771555
*/
1578-
if (unlikely(!try_module_get(*probed_mod))) {
1579-
ret = -ENOENT;
1580-
goto out;
1581-
}
1556+
if (unlikely(!try_module_get(*probed_mod)))
1557+
return -ENOENT;
15821558
}
15831559
/* Ensure it is not in reserved area. */
15841560
if (in_gate_area_no_mm((unsigned long) p->addr) ||
@@ -1588,8 +1564,7 @@ static int check_kprobe_address_safe(struct kprobe *p,
15881564
find_bug((unsigned long)p->addr) ||
15891565
is_cfi_preamble_symbol((unsigned long)p->addr)) {
15901566
module_put(*probed_mod);
1591-
ret = -EINVAL;
1592-
goto out;
1567+
return -EINVAL;
15931568
}
15941569

15951570
/* Get module refcount and reject __init functions for loaded modules. */
@@ -1601,14 +1576,11 @@ static int check_kprobe_address_safe(struct kprobe *p,
16011576
if (within_module_init((unsigned long)p->addr, *probed_mod) &&
16021577
!module_is_coming(*probed_mod)) {
16031578
module_put(*probed_mod);
1604-
ret = -ENOENT;
1579+
return -ENOENT;
16051580
}
16061581
}
16071582

1608-
out:
1609-
jump_label_unlock();
1610-
1611-
return ret;
1583+
return 0;
16121584
}
16131585

16141586
static int __register_kprobe(struct kprobe *p)
@@ -1623,14 +1595,13 @@ static int __register_kprobe(struct kprobe *p)
16231595
/* Since this may unoptimize 'old_p', locking 'text_mutex'. */
16241596
return register_aggr_kprobe(old_p, p);
16251597

1626-
cpus_read_lock();
1627-
/* Prevent text modification */
1628-
mutex_lock(&text_mutex);
1629-
ret = prepare_kprobe(p);
1630-
mutex_unlock(&text_mutex);
1631-
cpus_read_unlock();
1632-
if (ret)
1633-
return ret;
1598+
scoped_guard(cpus_read_lock) {
1599+
/* Prevent text modification */
1600+
guard(mutex)(&text_mutex);
1601+
ret = prepare_kprobe(p);
1602+
if (ret)
1603+
return ret;
1604+
}
16341605

16351606
INIT_HLIST_NODE(&p->hlist);
16361607
hlist_add_head_rcu(&p->hlist,

0 commit comments

Comments
 (0)