Skip to content

Commit ecbd804

Browse files
kkdwivediAlexei Starovoitov
authored andcommitted
rqspinlock: Add basic support for CONFIG_PARAVIRT
We ripped out PV and virtualization related bits from rqspinlock in an earlier commit, however, a fair lock performs poorly within a virtual machine when the lock holder is preempted. As such, retain the virt_spin_lock fallback to test and set lock, but with timeout and deadlock detection. We can do this by simply depending on the resilient_tas_spin_lock implementation from the previous patch. We don't integrate support for CONFIG_PARAVIRT_SPINLOCKS yet, as that requires more involved algorithmic changes and introduces more complexity. It can be done when the need arises in the future. Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com> Link: https://lore.kernel.org/r/20250316040541.108729-15-memxor@gmail.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
1 parent c9102a6 commit ecbd804

File tree

3 files changed

+50
-0
lines changed

3 files changed

+50
-0
lines changed

arch/x86/include/asm/rqspinlock.h

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
/* SPDX-License-Identifier: GPL-2.0 */
2+
#ifndef _ASM_X86_RQSPINLOCK_H
3+
#define _ASM_X86_RQSPINLOCK_H
4+
5+
#include <asm/paravirt.h>
6+
7+
#ifdef CONFIG_PARAVIRT
8+
DECLARE_STATIC_KEY_FALSE(virt_spin_lock_key);
9+
10+
#define resilient_virt_spin_lock_enabled resilient_virt_spin_lock_enabled
11+
static __always_inline bool resilient_virt_spin_lock_enabled(void)
12+
{
13+
return static_branch_likely(&virt_spin_lock_key);
14+
}
15+
16+
#ifdef CONFIG_QUEUED_SPINLOCKS
17+
typedef struct qspinlock rqspinlock_t;
18+
#else
19+
typedef struct rqspinlock rqspinlock_t;
20+
#endif
21+
extern int resilient_tas_spin_lock(rqspinlock_t *lock);
22+
23+
#define resilient_virt_spin_lock resilient_virt_spin_lock
24+
static inline int resilient_virt_spin_lock(rqspinlock_t *lock)
25+
{
26+
return resilient_tas_spin_lock(lock);
27+
}
28+
29+
#endif /* CONFIG_PARAVIRT */
30+
31+
#include <asm-generic/rqspinlock.h>
32+
33+
#endif /* _ASM_X86_RQSPINLOCK_H */

include/asm-generic/rqspinlock.h

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,20 @@ extern int resilient_tas_spin_lock(rqspinlock_t *lock);
3535
extern int resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val);
3636
#endif
3737

38+
#ifndef resilient_virt_spin_lock_enabled
39+
static __always_inline bool resilient_virt_spin_lock_enabled(void)
40+
{
41+
return false;
42+
}
43+
#endif
44+
45+
#ifndef resilient_virt_spin_lock
46+
static __always_inline int resilient_virt_spin_lock(rqspinlock_t *lock)
47+
{
48+
return 0;
49+
}
50+
#endif
51+
3852
/*
3953
* Default timeout for waiting loops is 0.25 seconds
4054
*/

kernel/bpf/rqspinlock.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -352,6 +352,9 @@ int __lockfunc resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val)
352352

353353
BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
354354

355+
if (resilient_virt_spin_lock_enabled())
356+
return resilient_virt_spin_lock(lock);
357+
355358
RES_INIT_TIMEOUT(ts);
356359

357360
/*

0 commit comments

Comments
 (0)