@@ -2506,94 +2506,29 @@ static void fs_reclaim_tests(void)
2506
2506
pr_cont ("\n" );
2507
2507
}
2508
2508
2509
- #define __guard (cleanup ) __maybe_unused __attribute__((__cleanup__(cleanup)))
2509
+ /* Defines guard classes to create contexts */
2510
+ DEFINE_LOCK_GUARD_0 (HARDIRQ , HARDIRQ_ENTER (), HARDIRQ_EXIT ())
2511
+ DEFINE_LOCK_GUARD_0 (NOTTHREADED_HARDIRQ ,
2512
+ do {
2513
+ local_irq_disable ();
2514
+ __irq_enter ();
2515
+ WARN_ON (!in_irq ());
2516
+ } while (0 ), HARDIRQ_EXIT ())
2517
+ DEFINE_LOCK_GUARD_0 (SOFTIRQ , SOFTIRQ_ENTER (), SOFTIRQ_EXIT ())
2518
+
2519
+ /* Define RCU guards, should go away when RCU has its own guard definitions */
2520
+ DEFINE_LOCK_GUARD_0 (RCU , rcu_read_lock (), rcu_read_unlock ())
2521
+ DEFINE_LOCK_GUARD_0 (RCU_BH , rcu_read_lock_bh (), rcu_read_unlock_bh ())
2522
+ DEFINE_LOCK_GUARD_0 (RCU_SCHED , rcu_read_lock_sched (), rcu_read_unlock_sched ())
2510
2523
2511
- static void hardirq_exit (int * _ )
2512
- {
2513
- HARDIRQ_EXIT ();
2514
- }
2515
-
2516
- #define HARDIRQ_CONTEXT (name , ...) \
2517
- int hardirq_guard_##name __guard(hardirq_exit); \
2518
- HARDIRQ_ENTER();
2519
-
2520
- #define NOTTHREADED_HARDIRQ_CONTEXT (name , ...) \
2521
- int notthreaded_hardirq_guard_##name __guard(hardirq_exit); \
2522
- local_irq_disable(); \
2523
- __irq_enter(); \
2524
- WARN_ON(!in_irq());
2525
-
2526
- static void softirq_exit (int * _ )
2527
- {
2528
- SOFTIRQ_EXIT ();
2529
- }
2530
-
2531
- #define SOFTIRQ_CONTEXT (name , ...) \
2532
- int softirq_guard_##name __guard(softirq_exit); \
2533
- SOFTIRQ_ENTER();
2534
-
2535
- static void rcu_exit (int * _ )
2536
- {
2537
- rcu_read_unlock ();
2538
- }
2539
-
2540
- #define RCU_CONTEXT (name , ...) \
2541
- int rcu_guard_##name __guard(rcu_exit); \
2542
- rcu_read_lock();
2543
-
2544
- static void rcu_bh_exit (int * _ )
2545
- {
2546
- rcu_read_unlock_bh ();
2547
- }
2548
-
2549
- #define RCU_BH_CONTEXT (name , ...) \
2550
- int rcu_bh_guard_##name __guard(rcu_bh_exit); \
2551
- rcu_read_lock_bh();
2552
-
2553
- static void rcu_sched_exit (int * _ )
2554
- {
2555
- rcu_read_unlock_sched ();
2556
- }
2557
-
2558
- #define RCU_SCHED_CONTEXT (name , ...) \
2559
- int rcu_sched_guard_##name __guard(rcu_sched_exit); \
2560
- rcu_read_lock_sched();
2561
-
2562
- static void raw_spinlock_exit (raw_spinlock_t * * lock )
2563
- {
2564
- raw_spin_unlock (* lock );
2565
- }
2566
-
2567
- #define RAW_SPINLOCK_CONTEXT (name , lock ) \
2568
- raw_spinlock_t *raw_spinlock_guard_##name __guard(raw_spinlock_exit) = &(lock); \
2569
- raw_spin_lock(&(lock));
2570
-
2571
- static void spinlock_exit (spinlock_t * * lock )
2572
- {
2573
- spin_unlock (* lock );
2574
- }
2575
-
2576
- #define SPINLOCK_CONTEXT (name , lock ) \
2577
- spinlock_t *spinlock_guard_##name __guard(spinlock_exit) = &(lock); \
2578
- spin_lock(&(lock));
2579
-
2580
- static void mutex_exit (struct mutex * * lock )
2581
- {
2582
- mutex_unlock (* lock );
2583
- }
2584
-
2585
- #define MUTEX_CONTEXT (name , lock ) \
2586
- struct mutex *mutex_guard_##name __guard(mutex_exit) = &(lock); \
2587
- mutex_lock(&(lock));
2588
2524
2589
2525
#define GENERATE_2_CONTEXT_TESTCASE (outer , outer_lock , inner , inner_lock ) \
2590
2526
\
2591
2527
static void __maybe_unused inner##_in_##outer(void) \
2592
2528
{ \
2593
- outer##_CONTEXT(_, outer_lock); \
2594
- { \
2595
- inner##_CONTEXT(_, inner_lock); \
2596
- } \
2529
+ /* Relies the reversed clean-up ordering: inner first */ \
2530
+ guard (outer )(outer_lock ); \
2531
+ guard (inner )(inner_lock ); \
2597
2532
}
2598
2533
2599
2534
/*
@@ -2632,38 +2567,38 @@ GENERATE_2_CONTEXT_TESTCASE(SOFTIRQ, , inner, inner_lock) \
2632
2567
GENERATE_2_CONTEXT_TESTCASE(RCU, , inner, inner_lock) \
2633
2568
GENERATE_2_CONTEXT_TESTCASE(RCU_BH, , inner, inner_lock) \
2634
2569
GENERATE_2_CONTEXT_TESTCASE(RCU_SCHED, , inner, inner_lock) \
2635
- GENERATE_2_CONTEXT_TESTCASE(RAW_SPINLOCK, raw_lock_A, inner, inner_lock) \
2636
- GENERATE_2_CONTEXT_TESTCASE(SPINLOCK, lock_A, inner, inner_lock) \
2637
- GENERATE_2_CONTEXT_TESTCASE(MUTEX, mutex_A, inner, inner_lock)
2570
+ GENERATE_2_CONTEXT_TESTCASE(raw_spinlock, & raw_lock_A, inner, inner_lock) \
2571
+ GENERATE_2_CONTEXT_TESTCASE(spinlock, & lock_A, inner, inner_lock) \
2572
+ GENERATE_2_CONTEXT_TESTCASE(mutex, & mutex_A, inner, inner_lock)
2638
2573
2639
2574
GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER (RCU , )
2640
- GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER (RAW_SPINLOCK , raw_lock_B )
2641
- GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER (SPINLOCK , lock_B )
2642
- GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER (MUTEX , mutex_B )
2575
+ GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER (raw_spinlock , & raw_lock_B )
2576
+ GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER (spinlock , & lock_B )
2577
+ GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER (mutex , & mutex_B )
2643
2578
2644
2579
/* the outer context allows all kinds of preemption */
2645
2580
#define DO_CONTEXT_TESTCASE_OUTER_PREEMPTIBLE (outer ) \
2646
2581
dotest (RCU_in_ ##outer , SUCCESS, LOCKTYPE_RWLOCK); \
2647
- dotest(RAW_SPINLOCK_in_ ##outer, SUCCESS, LOCKTYPE_SPIN); \
2648
- dotest(SPINLOCK_in_ ##outer, SUCCESS, LOCKTYPE_SPIN); \
2649
- dotest(MUTEX_in_ ##outer, SUCCESS, LOCKTYPE_MUTEX); \
2582
+ dotest(raw_spinlock_in_ ##outer, SUCCESS, LOCKTYPE_SPIN); \
2583
+ dotest(spinlock_in_ ##outer, SUCCESS, LOCKTYPE_SPIN); \
2584
+ dotest(mutex_in_ ##outer, SUCCESS, LOCKTYPE_MUTEX); \
2650
2585
2651
2586
/*
2652
2587
* the outer context only allows the preemption introduced by spinlock_t (which
2653
2588
* is a sleepable lock for PREEMPT_RT)
2654
2589
*/
2655
2590
#define DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE (outer ) \
2656
2591
dotest(RCU_in_##outer, SUCCESS, LOCKTYPE_RWLOCK); \
2657
- dotest(RAW_SPINLOCK_in_ ##outer, SUCCESS, LOCKTYPE_SPIN); \
2658
- dotest(SPINLOCK_in_ ##outer, SUCCESS, LOCKTYPE_SPIN); \
2659
- dotest(MUTEX_in_ ##outer, FAILURE, LOCKTYPE_MUTEX); \
2592
+ dotest(raw_spinlock_in_ ##outer, SUCCESS, LOCKTYPE_SPIN); \
2593
+ dotest(spinlock_in_ ##outer, SUCCESS, LOCKTYPE_SPIN); \
2594
+ dotest(mutex_in_ ##outer, FAILURE, LOCKTYPE_MUTEX); \
2660
2595
2661
2596
/* the outer doesn't allows any kind of preemption */
2662
2597
#define DO_CONTEXT_TESTCASE_OUTER_NOT_PREEMPTIBLE (outer ) \
2663
2598
dotest(RCU_in_##outer, SUCCESS, LOCKTYPE_RWLOCK); \
2664
- dotest(RAW_SPINLOCK_in_ ##outer, SUCCESS, LOCKTYPE_SPIN); \
2665
- dotest(SPINLOCK_in_ ##outer, FAILURE, LOCKTYPE_SPIN); \
2666
- dotest(MUTEX_in_ ##outer, FAILURE, LOCKTYPE_MUTEX); \
2599
+ dotest(raw_spinlock_in_ ##outer, SUCCESS, LOCKTYPE_SPIN); \
2600
+ dotest(spinlock_in_ ##outer, FAILURE, LOCKTYPE_SPIN); \
2601
+ dotest(mutex_in_ ##outer, FAILURE, LOCKTYPE_MUTEX); \
2667
2602
2668
2603
static void wait_context_tests (void )
2669
2604
{
@@ -2697,15 +2632,15 @@ static void wait_context_tests(void)
2697
2632
pr_cont ("\n" );
2698
2633
2699
2634
print_testname ("in RAW_SPINLOCK context" );
2700
- DO_CONTEXT_TESTCASE_OUTER_NOT_PREEMPTIBLE (RAW_SPINLOCK );
2635
+ DO_CONTEXT_TESTCASE_OUTER_NOT_PREEMPTIBLE (raw_spinlock );
2701
2636
pr_cont ("\n" );
2702
2637
2703
2638
print_testname ("in SPINLOCK context" );
2704
- DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE (SPINLOCK );
2639
+ DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE (spinlock );
2705
2640
pr_cont ("\n" );
2706
2641
2707
2642
print_testname ("in MUTEX context" );
2708
- DO_CONTEXT_TESTCASE_OUTER_PREEMPTIBLE (MUTEX );
2643
+ DO_CONTEXT_TESTCASE_OUTER_PREEMPTIBLE (mutex );
2709
2644
pr_cont ("\n" );
2710
2645
}
2711
2646
0 commit comments