@@ -596,41 +596,38 @@ static void kick_kprobe_optimizer(void)
596
596
/* Kprobe jump optimizer */
597
597
static void kprobe_optimizer (struct work_struct * work )
598
598
{
599
- mutex_lock (& kprobe_mutex );
600
- cpus_read_lock ();
601
- mutex_lock (& text_mutex );
599
+ guard (mutex )(& kprobe_mutex );
602
600
603
- /*
604
- * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed)
605
- * kprobes before waiting for quiesence period.
606
- */
607
- do_unoptimize_kprobes ();
601
+ scoped_guard (cpus_read_lock ) {
602
+ guard (mutex )(& text_mutex );
608
603
609
- /*
610
- * Step 2: Wait for quiesence period to ensure all potentially
611
- * preempted tasks to have normally scheduled. Because optprobe
612
- * may modify multiple instructions, there is a chance that Nth
613
- * instruction is preempted. In that case, such tasks can return
614
- * to 2nd-Nth byte of jump instruction. This wait is for avoiding it.
615
- * Note that on non-preemptive kernel, this is transparently converted
616
- * to synchronoze_sched() to wait for all interrupts to have completed.
617
- */
618
- synchronize_rcu_tasks ();
604
+ /*
605
+ * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed)
606
+ * kprobes before waiting for quiesence period.
607
+ */
608
+ do_unoptimize_kprobes ();
619
609
620
- /* Step 3: Optimize kprobes after quiesence period */
621
- do_optimize_kprobes ();
610
+ /*
611
+ * Step 2: Wait for quiesence period to ensure all potentially
612
+ * preempted tasks to have normally scheduled. Because optprobe
613
+ * may modify multiple instructions, there is a chance that Nth
614
+ * instruction is preempted. In that case, such tasks can return
615
+ * to 2nd-Nth byte of jump instruction. This wait is for avoiding it.
616
+ * Note that on non-preemptive kernel, this is transparently converted
617
+ * to synchronoze_sched() to wait for all interrupts to have completed.
618
+ */
619
+ synchronize_rcu_tasks ();
622
620
623
- /* Step 4: Free cleaned kprobes after quiesence period */
624
- do_free_cleaned_kprobes ();
621
+ /* Step 3: Optimize kprobes after quiesence period */
622
+ do_optimize_kprobes ();
625
623
626
- mutex_unlock (& text_mutex );
627
- cpus_read_unlock ();
624
+ /* Step 4: Free cleaned kprobes after quiesence period */
625
+ do_free_cleaned_kprobes ();
626
+ }
628
627
629
628
/* Step 5: Kick optimizer again if needed */
630
629
if (!list_empty (& optimizing_list ) || !list_empty (& unoptimizing_list ))
631
630
kick_kprobe_optimizer ();
632
-
633
- mutex_unlock (& kprobe_mutex );
634
631
}
635
632
636
633
static void wait_for_kprobe_optimizer_locked (void )
@@ -853,29 +850,24 @@ static void try_to_optimize_kprobe(struct kprobe *p)
853
850
return ;
854
851
855
852
/* For preparing optimization, jump_label_text_reserved() is called. */
856
- cpus_read_lock ();
857
- jump_label_lock ();
858
- mutex_lock (& text_mutex );
853
+ guard ( cpus_read_lock ) ();
854
+ guard ( jump_label_lock ) ();
855
+ guard ( mutex ) (& text_mutex );
859
856
860
857
ap = alloc_aggr_kprobe (p );
861
858
if (!ap )
862
- goto out ;
859
+ return ;
863
860
864
861
op = container_of (ap , struct optimized_kprobe , kp );
865
862
if (!arch_prepared_optinsn (& op -> optinsn )) {
866
863
/* If failed to setup optimizing, fallback to kprobe. */
867
864
arch_remove_optimized_kprobe (op );
868
865
kfree (op );
869
- goto out ;
866
+ return ;
870
867
}
871
868
872
869
init_aggr_kprobe (ap , p );
873
870
optimize_kprobe (ap ); /* This just kicks optimizer thread. */
874
-
875
- out :
876
- mutex_unlock (& text_mutex );
877
- jump_label_unlock ();
878
- cpus_read_unlock ();
879
871
}
880
872
881
873
static void optimize_all_kprobes (void )
@@ -1158,12 +1150,9 @@ static int arm_kprobe(struct kprobe *kp)
1158
1150
if (unlikely (kprobe_ftrace (kp )))
1159
1151
return arm_kprobe_ftrace (kp );
1160
1152
1161
- cpus_read_lock ();
1162
- mutex_lock (& text_mutex );
1153
+ guard ( cpus_read_lock ) ();
1154
+ guard ( mutex ) (& text_mutex );
1163
1155
__arm_kprobe (kp );
1164
- mutex_unlock (& text_mutex );
1165
- cpus_read_unlock ();
1166
-
1167
1156
return 0 ;
1168
1157
}
1169
1158
@@ -1172,12 +1161,9 @@ static int disarm_kprobe(struct kprobe *kp, bool reopt)
1172
1161
if (unlikely (kprobe_ftrace (kp )))
1173
1162
return disarm_kprobe_ftrace (kp );
1174
1163
1175
- cpus_read_lock ();
1176
- mutex_lock (& text_mutex );
1164
+ guard ( cpus_read_lock ) ();
1165
+ guard ( mutex ) (& text_mutex );
1177
1166
__disarm_kprobe (kp , reopt );
1178
- mutex_unlock (& text_mutex );
1179
- cpus_read_unlock ();
1180
-
1181
1167
return 0 ;
1182
1168
}
1183
1169
@@ -1294,62 +1280,55 @@ static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p)
1294
1280
int ret = 0 ;
1295
1281
struct kprobe * ap = orig_p ;
1296
1282
1297
- cpus_read_lock ();
1298
-
1299
- /* For preparing optimization, jump_label_text_reserved() is called */
1300
- jump_label_lock ();
1301
- mutex_lock (& text_mutex );
1302
-
1303
- if (!kprobe_aggrprobe (orig_p )) {
1304
- /* If 'orig_p' is not an 'aggr_kprobe', create new one. */
1305
- ap = alloc_aggr_kprobe (orig_p );
1306
- if (!ap ) {
1307
- ret = - ENOMEM ;
1308
- goto out ;
1283
+ scoped_guard (cpus_read_lock ) {
1284
+ /* For preparing optimization, jump_label_text_reserved() is called */
1285
+ guard (jump_label_lock )();
1286
+ guard (mutex )(& text_mutex );
1287
+
1288
+ if (!kprobe_aggrprobe (orig_p )) {
1289
+ /* If 'orig_p' is not an 'aggr_kprobe', create new one. */
1290
+ ap = alloc_aggr_kprobe (orig_p );
1291
+ if (!ap )
1292
+ return - ENOMEM ;
1293
+ init_aggr_kprobe (ap , orig_p );
1294
+ } else if (kprobe_unused (ap )) {
1295
+ /* This probe is going to die. Rescue it */
1296
+ ret = reuse_unused_kprobe (ap );
1297
+ if (ret )
1298
+ return ret ;
1309
1299
}
1310
- init_aggr_kprobe (ap , orig_p );
1311
- } else if (kprobe_unused (ap )) {
1312
- /* This probe is going to die. Rescue it */
1313
- ret = reuse_unused_kprobe (ap );
1314
- if (ret )
1315
- goto out ;
1316
- }
1317
1300
1318
- if (kprobe_gone (ap )) {
1319
- /*
1320
- * Attempting to insert new probe at the same location that
1321
- * had a probe in the module vaddr area which already
1322
- * freed. So, the instruction slot has already been
1323
- * released. We need a new slot for the new probe.
1324
- */
1325
- ret = arch_prepare_kprobe (ap );
1326
- if (ret )
1301
+ if (kprobe_gone (ap )) {
1327
1302
/*
1328
- * Even if fail to allocate new slot, don't need to
1329
- * free the 'ap'. It will be used next time, or
1330
- * freed by unregister_kprobe().
1303
+ * Attempting to insert new probe at the same location that
1304
+ * had a probe in the module vaddr area which already
1305
+ * freed. So, the instruction slot has already been
1306
+ * released. We need a new slot for the new probe.
1331
1307
*/
1332
- goto out ;
1308
+ ret = arch_prepare_kprobe (ap );
1309
+ if (ret )
1310
+ /*
1311
+ * Even if fail to allocate new slot, don't need to
1312
+ * free the 'ap'. It will be used next time, or
1313
+ * freed by unregister_kprobe().
1314
+ */
1315
+ return ret ;
1333
1316
1334
- /* Prepare optimized instructions if possible. */
1335
- prepare_optimized_kprobe (ap );
1317
+ /* Prepare optimized instructions if possible. */
1318
+ prepare_optimized_kprobe (ap );
1336
1319
1337
- /*
1338
- * Clear gone flag to prevent allocating new slot again, and
1339
- * set disabled flag because it is not armed yet.
1340
- */
1341
- ap -> flags = (ap -> flags & ~KPROBE_FLAG_GONE )
1342
- | KPROBE_FLAG_DISABLED ;
1343
- }
1344
-
1345
- /* Copy the insn slot of 'p' to 'ap'. */
1346
- copy_kprobe (ap , p );
1347
- ret = add_new_kprobe (ap , p );
1320
+ /*
1321
+ * Clear gone flag to prevent allocating new slot again, and
1322
+ * set disabled flag because it is not armed yet.
1323
+ */
1324
+ ap -> flags = (ap -> flags & ~KPROBE_FLAG_GONE )
1325
+ | KPROBE_FLAG_DISABLED ;
1326
+ }
1348
1327
1349
- out :
1350
- mutex_unlock ( & text_mutex );
1351
- jump_label_unlock ( );
1352
- cpus_read_unlock ();
1328
+ /* Copy the insn slot of 'p' to 'ap'. */
1329
+ copy_kprobe ( ap , p );
1330
+ ret = add_new_kprobe ( ap , p );
1331
+ }
1353
1332
1354
1333
if (ret == 0 && kprobe_disabled (ap ) && !kprobe_disabled (p )) {
1355
1334
ap -> flags &= ~KPROBE_FLAG_DISABLED ;
@@ -1559,26 +1538,23 @@ static int check_kprobe_address_safe(struct kprobe *p,
1559
1538
ret = check_ftrace_location (p );
1560
1539
if (ret )
1561
1540
return ret ;
1562
- jump_label_lock ();
1541
+
1542
+ guard (jump_label_lock )();
1563
1543
1564
1544
/* Ensure the address is in a text area, and find a module if exists. */
1565
1545
* probed_mod = NULL ;
1566
1546
if (!core_kernel_text ((unsigned long ) p -> addr )) {
1567
1547
guard (preempt )();
1568
1548
* probed_mod = __module_text_address ((unsigned long ) p -> addr );
1569
- if (!(* probed_mod )) {
1570
- ret = - EINVAL ;
1571
- goto out ;
1572
- }
1549
+ if (!(* probed_mod ))
1550
+ return - EINVAL ;
1573
1551
1574
1552
/*
1575
1553
* We must hold a refcount of the probed module while updating
1576
1554
* its code to prohibit unexpected unloading.
1577
1555
*/
1578
- if (unlikely (!try_module_get (* probed_mod ))) {
1579
- ret = - ENOENT ;
1580
- goto out ;
1581
- }
1556
+ if (unlikely (!try_module_get (* probed_mod )))
1557
+ return - ENOENT ;
1582
1558
}
1583
1559
/* Ensure it is not in reserved area. */
1584
1560
if (in_gate_area_no_mm ((unsigned long ) p -> addr ) ||
@@ -1588,8 +1564,7 @@ static int check_kprobe_address_safe(struct kprobe *p,
1588
1564
find_bug ((unsigned long )p -> addr ) ||
1589
1565
is_cfi_preamble_symbol ((unsigned long )p -> addr )) {
1590
1566
module_put (* probed_mod );
1591
- ret = - EINVAL ;
1592
- goto out ;
1567
+ return - EINVAL ;
1593
1568
}
1594
1569
1595
1570
/* Get module refcount and reject __init functions for loaded modules. */
@@ -1601,14 +1576,11 @@ static int check_kprobe_address_safe(struct kprobe *p,
1601
1576
if (within_module_init ((unsigned long )p -> addr , * probed_mod ) &&
1602
1577
!module_is_coming (* probed_mod )) {
1603
1578
module_put (* probed_mod );
1604
- ret = - ENOENT ;
1579
+ return - ENOENT ;
1605
1580
}
1606
1581
}
1607
1582
1608
- out :
1609
- jump_label_unlock ();
1610
-
1611
- return ret ;
1583
+ return 0 ;
1612
1584
}
1613
1585
1614
1586
static int __register_kprobe (struct kprobe * p )
@@ -1623,14 +1595,13 @@ static int __register_kprobe(struct kprobe *p)
1623
1595
/* Since this may unoptimize 'old_p', locking 'text_mutex'. */
1624
1596
return register_aggr_kprobe (old_p , p );
1625
1597
1626
- cpus_read_lock ();
1627
- /* Prevent text modification */
1628
- mutex_lock (& text_mutex );
1629
- ret = prepare_kprobe (p );
1630
- mutex_unlock (& text_mutex );
1631
- cpus_read_unlock ();
1632
- if (ret )
1633
- return ret ;
1598
+ scoped_guard (cpus_read_lock ) {
1599
+ /* Prevent text modification */
1600
+ guard (mutex )(& text_mutex );
1601
+ ret = prepare_kprobe (p );
1602
+ if (ret )
1603
+ return ret ;
1604
+ }
1634
1605
1635
1606
INIT_HLIST_NODE (& p -> hlist );
1636
1607
hlist_add_head_rcu (& p -> hlist ,
0 commit comments