Skip to content

Commit a926d09

Browse files
kkdwivediAlexei Starovoitov
authored andcommitted
rqspinlock: Drop PV and virtualization support
Changes to rqspinlock in subsequent commits will be algorithmic modifications, which won't remain in agreement with the implementations of paravirt spin lock and virt_spin_lock support. These future changes include measures for terminating waiting loops in slow path after a certain point. While using a fair lock like qspinlock directly inside virtual machines leads to suboptimal performance under certain conditions, we cannot use the existing virtualization support before we make it resilient as well. Therefore, drop it for now. Note that we need to drop qspinlock_stat.h, as it's only relevant in case of CONFIG_PARAVIRT_SPINLOCKS=y, but we need to keep lock_events.h in the includes, which was indirectly pulled in before. Reviewed-by: Barret Rhoden <brho@google.com> Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com> Link: https://lore.kernel.org/r/20250316040541.108729-7-memxor@gmail.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
1 parent 30ff133 commit a926d09

File tree

1 file changed

+1
-90
lines changed

1 file changed

+1
-90
lines changed

kernel/bpf/rqspinlock.c

Lines changed: 1 addition & 90 deletions
Original file line numberDiff line numberDiff line change
@@ -11,8 +11,6 @@
1111
* Peter Zijlstra <peterz@infradead.org>
1212
*/
1313

14-
#ifndef _GEN_PV_LOCK_SLOWPATH
15-
1614
#include <linux/smp.h>
1715
#include <linux/bug.h>
1816
#include <linux/cpumask.h>
@@ -29,7 +27,7 @@
2927
* Include queued spinlock definitions and statistics code
3028
*/
3129
#include "../locking/qspinlock.h"
32-
#include "../locking/qspinlock_stat.h"
30+
#include "../locking/lock_events.h"
3331

3432
/*
3533
* The basic principle of a queue-based spinlock can best be understood
@@ -75,38 +73,9 @@
7573
* contexts: task, softirq, hardirq, nmi.
7674
*
7775
* Exactly fits one 64-byte cacheline on a 64-bit architecture.
78-
*
79-
* PV doubles the storage and uses the second cacheline for PV state.
8076
*/
8177
static DEFINE_PER_CPU_ALIGNED(struct qnode, rqnodes[_Q_MAX_NODES]);
8278

83-
/*
84-
* Generate the native code for resilient_queued_spin_unlock_slowpath(); provide NOPs
85-
* for all the PV callbacks.
86-
*/
87-
88-
static __always_inline void __pv_init_node(struct mcs_spinlock *node) { }
89-
static __always_inline void __pv_wait_node(struct mcs_spinlock *node,
90-
struct mcs_spinlock *prev) { }
91-
static __always_inline void __pv_kick_node(struct qspinlock *lock,
92-
struct mcs_spinlock *node) { }
93-
static __always_inline u32 __pv_wait_head_or_lock(struct qspinlock *lock,
94-
struct mcs_spinlock *node)
95-
{ return 0; }
96-
97-
#define pv_enabled() false
98-
99-
#define pv_init_node __pv_init_node
100-
#define pv_wait_node __pv_wait_node
101-
#define pv_kick_node __pv_kick_node
102-
#define pv_wait_head_or_lock __pv_wait_head_or_lock
103-
104-
#ifdef CONFIG_PARAVIRT_SPINLOCKS
105-
#define resilient_queued_spin_lock_slowpath native_resilient_queued_spin_lock_slowpath
106-
#endif
107-
108-
#endif /* _GEN_PV_LOCK_SLOWPATH */
109-
11079
/**
11180
* resilient_queued_spin_lock_slowpath - acquire the queued spinlock
11281
* @lock: Pointer to queued spinlock structure
@@ -136,12 +105,6 @@ void __lockfunc resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val)
136105

137106
BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
138107

139-
if (pv_enabled())
140-
goto pv_queue;
141-
142-
if (virt_spin_lock(lock))
143-
return;
144-
145108
/*
146109
* Wait for in-progress pending->locked hand-overs with a bounded
147110
* number of spins so that we guarantee forward progress.
@@ -212,7 +175,6 @@ void __lockfunc resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val)
212175
*/
213176
queue:
214177
lockevent_inc(lock_slowpath);
215-
pv_queue:
216178
node = this_cpu_ptr(&rqnodes[0].mcs);
217179
idx = node->count++;
218180
tail = encode_tail(smp_processor_id(), idx);
@@ -251,7 +213,6 @@ void __lockfunc resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val)
251213

252214
node->locked = 0;
253215
node->next = NULL;
254-
pv_init_node(node);
255216

256217
/*
257218
* We touched a (possibly) cold cacheline in the per-cpu queue node;
@@ -288,7 +249,6 @@ void __lockfunc resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val)
288249
/* Link @node into the waitqueue. */
289250
WRITE_ONCE(prev->next, node);
290251

291-
pv_wait_node(node, prev);
292252
arch_mcs_spin_lock_contended(&node->locked);
293253

294254
/*
@@ -312,23 +272,9 @@ void __lockfunc resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val)
312272
* store-release that clears the locked bit and create lock
313273
* sequentiality; this is because the set_locked() function below
314274
* does not imply a full barrier.
315-
*
316-
* The PV pv_wait_head_or_lock function, if active, will acquire
317-
* the lock and return a non-zero value. So we have to skip the
318-
* atomic_cond_read_acquire() call. As the next PV queue head hasn't
319-
* been designated yet, there is no way for the locked value to become
320-
* _Q_SLOW_VAL. So both the set_locked() and the
321-
* atomic_cmpxchg_relaxed() calls will be safe.
322-
*
323-
* If PV isn't active, 0 will be returned instead.
324-
*
325275
*/
326-
if ((val = pv_wait_head_or_lock(lock, node)))
327-
goto locked;
328-
329276
val = atomic_cond_read_acquire(&lock->val, !(VAL & _Q_LOCKED_PENDING_MASK));
330277

331-
locked:
332278
/*
333279
* claim the lock:
334280
*
@@ -341,11 +287,6 @@ void __lockfunc resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val)
341287
*/
342288

343289
/*
344-
* In the PV case we might already have _Q_LOCKED_VAL set, because
345-
* of lock stealing; therefore we must also allow:
346-
*
347-
* n,0,1 -> 0,0,1
348-
*
349290
* Note: at this point: (val & _Q_PENDING_MASK) == 0, because of the
350291
* above wait condition, therefore any concurrent setting of
351292
* PENDING will make the uncontended transition fail.
@@ -369,7 +310,6 @@ void __lockfunc resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val)
369310
next = smp_cond_load_relaxed(&node->next, (VAL));
370311

371312
arch_mcs_spin_unlock_contended(&next->locked);
372-
pv_kick_node(lock, next);
373313

374314
release:
375315
trace_contention_end(lock, 0);
@@ -380,32 +320,3 @@ void __lockfunc resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val)
380320
__this_cpu_dec(rqnodes[0].mcs.count);
381321
}
382322
EXPORT_SYMBOL_GPL(resilient_queued_spin_lock_slowpath);
383-
384-
/*
385-
* Generate the paravirt code for resilient_queued_spin_unlock_slowpath().
386-
*/
387-
#if !defined(_GEN_PV_LOCK_SLOWPATH) && defined(CONFIG_PARAVIRT_SPINLOCKS)
388-
#define _GEN_PV_LOCK_SLOWPATH
389-
390-
#undef pv_enabled
391-
#define pv_enabled() true
392-
393-
#undef pv_init_node
394-
#undef pv_wait_node
395-
#undef pv_kick_node
396-
#undef pv_wait_head_or_lock
397-
398-
#undef resilient_queued_spin_lock_slowpath
399-
#define resilient_queued_spin_lock_slowpath __pv_resilient_queued_spin_lock_slowpath
400-
401-
#include "../locking/qspinlock_paravirt.h"
402-
#include "rqspinlock.c"
403-
404-
bool nopvspin;
405-
static __init int parse_nopvspin(char *arg)
406-
{
407-
nopvspin = true;
408-
return 0;
409-
}
410-
early_param("nopvspin", parse_nopvspin);
411-
#endif

0 commit comments

Comments
 (0)