11
11
* Peter Zijlstra <peterz@infradead.org>
12
12
*/
13
13
14
- #ifndef _GEN_PV_LOCK_SLOWPATH
15
-
16
14
#include <linux/smp.h>
17
15
#include <linux/bug.h>
18
16
#include <linux/cpumask.h>
29
27
* Include queued spinlock definitions and statistics code
30
28
*/
31
29
#include "../locking/qspinlock.h"
32
- #include "../locking/qspinlock_stat .h"
30
+ #include "../locking/lock_events .h"
33
31
34
32
/*
35
33
* The basic principle of a queue-based spinlock can best be understood
75
73
* contexts: task, softirq, hardirq, nmi.
76
74
*
77
75
* Exactly fits one 64-byte cacheline on a 64-bit architecture.
78
- *
79
- * PV doubles the storage and uses the second cacheline for PV state.
80
76
*/
81
77
static DEFINE_PER_CPU_ALIGNED (struct qnode , rqnodes [_Q_MAX_NODES ]) ;
82
78
83
- /*
84
- * Generate the native code for resilient_queued_spin_unlock_slowpath(); provide NOPs
85
- * for all the PV callbacks.
86
- */
87
-
88
- static __always_inline void __pv_init_node (struct mcs_spinlock * node ) { }
89
- static __always_inline void __pv_wait_node (struct mcs_spinlock * node ,
90
- struct mcs_spinlock * prev ) { }
91
- static __always_inline void __pv_kick_node (struct qspinlock * lock ,
92
- struct mcs_spinlock * node ) { }
93
- static __always_inline u32 __pv_wait_head_or_lock (struct qspinlock * lock ,
94
- struct mcs_spinlock * node )
95
- { return 0 ; }
96
-
97
- #define pv_enabled () false
98
-
99
- #define pv_init_node __pv_init_node
100
- #define pv_wait_node __pv_wait_node
101
- #define pv_kick_node __pv_kick_node
102
- #define pv_wait_head_or_lock __pv_wait_head_or_lock
103
-
104
- #ifdef CONFIG_PARAVIRT_SPINLOCKS
105
- #define resilient_queued_spin_lock_slowpath native_resilient_queued_spin_lock_slowpath
106
- #endif
107
-
108
- #endif /* _GEN_PV_LOCK_SLOWPATH */
109
-
110
79
/**
111
80
* resilient_queued_spin_lock_slowpath - acquire the queued spinlock
112
81
* @lock: Pointer to queued spinlock structure
@@ -136,12 +105,6 @@ void __lockfunc resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val)
136
105
137
106
BUILD_BUG_ON (CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS ));
138
107
139
- if (pv_enabled ())
140
- goto pv_queue ;
141
-
142
- if (virt_spin_lock (lock ))
143
- return ;
144
-
145
108
/*
146
109
* Wait for in-progress pending->locked hand-overs with a bounded
147
110
* number of spins so that we guarantee forward progress.
@@ -212,7 +175,6 @@ void __lockfunc resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val)
212
175
*/
213
176
queue :
214
177
lockevent_inc (lock_slowpath );
215
- pv_queue :
216
178
node = this_cpu_ptr (& rqnodes [0 ].mcs );
217
179
idx = node -> count ++ ;
218
180
tail = encode_tail (smp_processor_id (), idx );
@@ -251,7 +213,6 @@ void __lockfunc resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val)
251
213
252
214
node -> locked = 0 ;
253
215
node -> next = NULL ;
254
- pv_init_node (node );
255
216
256
217
/*
257
218
* We touched a (possibly) cold cacheline in the per-cpu queue node;
@@ -288,7 +249,6 @@ void __lockfunc resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val)
288
249
/* Link @node into the waitqueue. */
289
250
WRITE_ONCE (prev -> next , node );
290
251
291
- pv_wait_node (node , prev );
292
252
arch_mcs_spin_lock_contended (& node -> locked );
293
253
294
254
/*
@@ -312,23 +272,9 @@ void __lockfunc resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val)
312
272
* store-release that clears the locked bit and create lock
313
273
* sequentiality; this is because the set_locked() function below
314
274
* does not imply a full barrier.
315
- *
316
- * The PV pv_wait_head_or_lock function, if active, will acquire
317
- * the lock and return a non-zero value. So we have to skip the
318
- * atomic_cond_read_acquire() call. As the next PV queue head hasn't
319
- * been designated yet, there is no way for the locked value to become
320
- * _Q_SLOW_VAL. So both the set_locked() and the
321
- * atomic_cmpxchg_relaxed() calls will be safe.
322
- *
323
- * If PV isn't active, 0 will be returned instead.
324
- *
325
275
*/
326
- if ((val = pv_wait_head_or_lock (lock , node )))
327
- goto locked ;
328
-
329
276
val = atomic_cond_read_acquire (& lock -> val , !(VAL & _Q_LOCKED_PENDING_MASK ));
330
277
331
- locked :
332
278
/*
333
279
* claim the lock:
334
280
*
@@ -341,11 +287,6 @@ void __lockfunc resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val)
341
287
*/
342
288
343
289
/*
344
- * In the PV case we might already have _Q_LOCKED_VAL set, because
345
- * of lock stealing; therefore we must also allow:
346
- *
347
- * n,0,1 -> 0,0,1
348
- *
349
290
* Note: at this point: (val & _Q_PENDING_MASK) == 0, because of the
350
291
* above wait condition, therefore any concurrent setting of
351
292
* PENDING will make the uncontended transition fail.
@@ -369,7 +310,6 @@ void __lockfunc resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val)
369
310
next = smp_cond_load_relaxed (& node -> next , (VAL ));
370
311
371
312
arch_mcs_spin_unlock_contended (& next -> locked );
372
- pv_kick_node (lock , next );
373
313
374
314
release :
375
315
trace_contention_end (lock , 0 );
@@ -380,32 +320,3 @@ void __lockfunc resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val)
380
320
__this_cpu_dec (rqnodes [0 ].mcs .count );
381
321
}
382
322
EXPORT_SYMBOL_GPL (resilient_queued_spin_lock_slowpath );
383
-
384
- /*
385
- * Generate the paravirt code for resilient_queued_spin_unlock_slowpath().
386
- */
387
- #if !defined(_GEN_PV_LOCK_SLOWPATH ) && defined(CONFIG_PARAVIRT_SPINLOCKS )
388
- #define _GEN_PV_LOCK_SLOWPATH
389
-
390
- #undef pv_enabled
391
- #define pv_enabled () true
392
-
393
- #undef pv_init_node
394
- #undef pv_wait_node
395
- #undef pv_kick_node
396
- #undef pv_wait_head_or_lock
397
-
398
- #undef resilient_queued_spin_lock_slowpath
399
- #define resilient_queued_spin_lock_slowpath __pv_resilient_queued_spin_lock_slowpath
400
-
401
- #include "../locking/qspinlock_paravirt.h"
402
- #include "rqspinlock.c"
403
-
404
- bool nopvspin ;
405
- static __init int parse_nopvspin (char * arg )
406
- {
407
- nopvspin = true;
408
- return 0 ;
409
- }
410
- early_param ("nopvspin" , parse_nopvspin );
411
- #endif
0 commit comments