Skip to content

Commit ab83647

Browse files
Alexandre Ghitipalmer-dabbelt
authored andcommitted
riscv: Add qspinlock support
In order to produce a generic kernel, a user can select CONFIG_COMBO_SPINLOCKS which will fallback at runtime to the ticket spinlock implementation if Zabha or Ziccrse are not present. Note that we can't use alternatives here because the discovery of extensions is done too late and we need to start with the qspinlock implementation because the ticket spinlock implementation would pollute the spinlock value, so let's use static keys. This is largely based on Guo's work and Leonardo reviews at [1]. Link: https://lore.kernel.org/linux-riscv/20231225125847.2778638-1-guoren@kernel.org/ [1] Signed-off-by: Guo Ren <guoren@kernel.org> Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com> Reviewed-by: Andrea Parri <parri.andrea@gmail.com> Link: https://lore.kernel.org/r/20241103145153.105097-14-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
1 parent 447b2af commit ab83647

File tree

7 files changed

+126
-2
lines changed

7 files changed

+126
-2
lines changed

Documentation/features/locking/queued-spinlocks/arch-support.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020
| openrisc: | ok |
2121
| parisc: | TODO |
2222
| powerpc: | ok |
23-
| riscv: | TODO |
23+
| riscv: | ok |
2424
| s390: | TODO |
2525
| sh: | TODO |
2626
| sparc: | ok |

arch/riscv/Kconfig

Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -82,6 +82,7 @@ config RISCV
8282
select ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP
8383
select ARCH_WANTS_NO_INSTR
8484
select ARCH_WANTS_THP_SWAP if HAVE_ARCH_TRANSPARENT_HUGEPAGE
85+
select ARCH_WEAK_RELEASE_ACQUIRE if ARCH_USE_QUEUED_SPINLOCKS
8586
select BINFMT_FLAT_NO_DATA_START_OFFSET if !MMU
8687
select BUILDTIME_TABLE_SORT if MMU
8788
select CLINT_TIMER if RISCV_M_MODE
@@ -507,6 +508,39 @@ config NODES_SHIFT
507508
Specify the maximum number of NUMA Nodes available on the target
508509
system. Increases memory reserved to accommodate various tables.
509510

511+
choice
512+
prompt "RISC-V spinlock type"
513+
default RISCV_COMBO_SPINLOCKS
514+
515+
config RISCV_TICKET_SPINLOCKS
516+
bool "Using ticket spinlock"
517+
518+
config RISCV_QUEUED_SPINLOCKS
519+
bool "Using queued spinlock"
520+
depends on SMP && MMU && NONPORTABLE
521+
select ARCH_USE_QUEUED_SPINLOCKS
522+
help
523+
The queued spinlock implementation requires the forward progress
524+
guarantee of cmpxchg()/xchg() atomic operations: CAS with Zabha or
525+
LR/SC with Ziccrse provide such guarantee.
526+
527+
Select this if and only if Zabha or Ziccrse is available on your
528+
platform, RISCV_QUEUED_SPINLOCKS must not be selected for platforms
529+
without one of those extensions.
530+
531+
If unsure, select RISCV_COMBO_SPINLOCKS, which will use qspinlocks
532+
when supported and otherwise ticket spinlocks.
533+
534+
config RISCV_COMBO_SPINLOCKS
535+
bool "Using combo spinlock"
536+
depends on SMP && MMU
537+
select ARCH_USE_QUEUED_SPINLOCKS
538+
help
539+
Embed both queued spinlock and ticket lock so that the spinlock
540+
implementation can be chosen at runtime.
541+
542+
endchoice
543+
510544
config RISCV_ALTERNATIVE
511545
bool
512546
depends on !XIP_KERNEL

arch/riscv/include/asm/Kbuild

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,10 +6,12 @@ generic-y += early_ioremap.h
66
generic-y += flat.h
77
generic-y += kvm_para.h
88
generic-y += mmzone.h
9+
generic-y += mcs_spinlock.h
910
generic-y += parport.h
10-
generic-y += spinlock.h
1111
generic-y += spinlock_types.h
12+
generic-y += ticket_spinlock.h
1213
generic-y += qrwlock.h
1314
generic-y += qrwlock_types.h
15+
generic-y += qspinlock.h
1416
generic-y += user.h
1517
generic-y += vmlinux.lds.h

arch/riscv/include/asm/spinlock.h

Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,47 @@
1+
/* SPDX-License-Identifier: GPL-2.0 */
2+
3+
#ifndef __ASM_RISCV_SPINLOCK_H
4+
#define __ASM_RISCV_SPINLOCK_H
5+
6+
#ifdef CONFIG_RISCV_COMBO_SPINLOCKS
7+
#define _Q_PENDING_LOOPS (1 << 9)
8+
9+
#define __no_arch_spinlock_redefine
10+
#include <asm/ticket_spinlock.h>
11+
#include <asm/qspinlock.h>
12+
#include <asm/jump_label.h>
13+
14+
/*
15+
* TODO: Use an alternative instead of a static key when we are able to parse
16+
* the extensions string earlier in the boot process.
17+
*/
18+
DECLARE_STATIC_KEY_TRUE(qspinlock_key);
19+
20+
#define SPINLOCK_BASE_DECLARE(op, type, type_lock) \
21+
static __always_inline type arch_spin_##op(type_lock lock) \
22+
{ \
23+
if (static_branch_unlikely(&qspinlock_key)) \
24+
return queued_spin_##op(lock); \
25+
return ticket_spin_##op(lock); \
26+
}
27+
28+
SPINLOCK_BASE_DECLARE(lock, void, arch_spinlock_t *)
29+
SPINLOCK_BASE_DECLARE(unlock, void, arch_spinlock_t *)
30+
SPINLOCK_BASE_DECLARE(is_locked, int, arch_spinlock_t *)
31+
SPINLOCK_BASE_DECLARE(is_contended, int, arch_spinlock_t *)
32+
SPINLOCK_BASE_DECLARE(trylock, bool, arch_spinlock_t *)
33+
SPINLOCK_BASE_DECLARE(value_unlocked, int, arch_spinlock_t)
34+
35+
#elif defined(CONFIG_RISCV_QUEUED_SPINLOCKS)
36+
37+
#include <asm/qspinlock.h>
38+
39+
#else
40+
41+
#include <asm/ticket_spinlock.h>
42+
43+
#endif
44+
45+
#include <asm/qrwlock.h>
46+
47+
#endif /* __ASM_RISCV_SPINLOCK_H */

arch/riscv/kernel/setup.c

Lines changed: 37 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -244,6 +244,42 @@ static void __init parse_dtb(void)
244244
#endif
245245
}
246246

247+
#if defined(CONFIG_RISCV_COMBO_SPINLOCKS)
248+
DEFINE_STATIC_KEY_TRUE(qspinlock_key);
249+
EXPORT_SYMBOL(qspinlock_key);
250+
#endif
251+
252+
static void __init riscv_spinlock_init(void)
253+
{
254+
char *using_ext = NULL;
255+
256+
if (IS_ENABLED(CONFIG_RISCV_TICKET_SPINLOCKS)) {
257+
pr_info("Ticket spinlock: enabled\n");
258+
return;
259+
}
260+
261+
if (IS_ENABLED(CONFIG_RISCV_ISA_ZABHA) &&
262+
IS_ENABLED(CONFIG_RISCV_ISA_ZACAS) &&
263+
riscv_isa_extension_available(NULL, ZABHA) &&
264+
riscv_isa_extension_available(NULL, ZACAS)) {
265+
using_ext = "using Zabha";
266+
} else if (riscv_isa_extension_available(NULL, ZICCRSE)) {
267+
using_ext = "using Ziccrse";
268+
}
269+
#if defined(CONFIG_RISCV_COMBO_SPINLOCKS)
270+
else {
271+
static_branch_disable(&qspinlock_key);
272+
pr_info("Ticket spinlock: enabled\n");
273+
return;
274+
}
275+
#endif
276+
277+
if (!using_ext)
278+
pr_err("Queued spinlock without Zabha or Ziccrse");
279+
else
280+
pr_info("Queued spinlock %s: enabled\n", using_ext);
281+
}
282+
247283
extern void __init init_rt_signal_env(void);
248284

249285
void __init setup_arch(char **cmdline_p)
@@ -297,6 +333,7 @@ void __init setup_arch(char **cmdline_p)
297333
riscv_set_dma_cache_alignment();
298334

299335
riscv_user_isa_enable();
336+
riscv_spinlock_init();
300337
}
301338

302339
bool arch_cpu_is_hotpluggable(int cpu)

include/asm-generic/qspinlock.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -136,6 +136,7 @@ static __always_inline bool virt_spin_lock(struct qspinlock *lock)
136136
}
137137
#endif
138138

139+
#ifndef __no_arch_spinlock_redefine
139140
/*
140141
* Remapping spinlock architecture specific functions to the corresponding
141142
* queued spinlock functions.
@@ -146,5 +147,6 @@ static __always_inline bool virt_spin_lock(struct qspinlock *lock)
146147
#define arch_spin_lock(l) queued_spin_lock(l)
147148
#define arch_spin_trylock(l) queued_spin_trylock(l)
148149
#define arch_spin_unlock(l) queued_spin_unlock(l)
150+
#endif
149151

150152
#endif /* __ASM_GENERIC_QSPINLOCK_H */

include/asm-generic/ticket_spinlock.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -89,6 +89,7 @@ static __always_inline int ticket_spin_is_contended(arch_spinlock_t *lock)
8989
return (s16)((val >> 16) - (val & 0xffff)) > 1;
9090
}
9191

92+
#ifndef __no_arch_spinlock_redefine
9293
/*
9394
* Remapping spinlock architecture specific functions to the corresponding
9495
* ticket spinlock functions.
@@ -99,5 +100,6 @@ static __always_inline int ticket_spin_is_contended(arch_spinlock_t *lock)
99100
#define arch_spin_lock(l) ticket_spin_lock(l)
100101
#define arch_spin_trylock(l) ticket_spin_trylock(l)
101102
#define arch_spin_unlock(l) ticket_spin_unlock(l)
103+
#endif
102104

103105
#endif /* __ASM_GENERIC_TICKET_SPINLOCK_H */

0 commit comments

Comments
 (0)