Skip to content

Commit cd6c916

Browse files
Merge patch series "riscv/barrier: tidying up barrier-related macro"
Eric Chan <ericchancf@google.com> says: This series makes barrier-related macro more neat and clear. This is a follow-up to [0-3], change to multiple patches, for readability, create new message thread. [0](v1/v2) https://lore.kernel.org/lkml/20240209125048.4078639-1-ericchancf@google.com/ [1] (v3) https://lore.kernel.org/lkml/20240213142856.2416073-1-ericchancf@google.com/ [2] (v4) https://lore.kernel.org/lkml/20240213200923.2547570-1-ericchancf@google.com/ [4] (v5) https://lore.kernel.org/lkml/20240213223810.2595804-1-ericchancf@google.com/ * b4-shazam-merge: riscv/barrier: Add missing space after ',' riscv/barrier: Consolidate fence definitions riscv/barrier: Define RISCV_FULL_BARRIER riscv/barrier: Define __{mb,rmb,wmb} Link: https://lore.kernel.org/r/20240217131206.3667544-1-ericchancf@google.com Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
2 parents c70dfa4 + 9133e6e commit cd6c916

File tree

7 files changed

+36
-32
lines changed

7 files changed

+36
-32
lines changed

arch/riscv/include/asm/atomic.h

Lines changed: 8 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,6 @@
1717
#endif
1818

1919
#include <asm/cmpxchg.h>
20-
#include <asm/barrier.h>
2120

2221
#define __atomic_acquire_fence() \
2322
__asm__ __volatile__(RISCV_ACQUIRE_BARRIER "" ::: "memory")
@@ -207,7 +206,7 @@ static __always_inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int
207206
" add %[rc], %[p], %[a]\n"
208207
" sc.w.rl %[rc], %[rc], %[c]\n"
209208
" bnez %[rc], 0b\n"
210-
" fence rw, rw\n"
209+
RISCV_FULL_BARRIER
211210
"1:\n"
212211
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
213212
: [a]"r" (a), [u]"r" (u)
@@ -228,7 +227,7 @@ static __always_inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a,
228227
" add %[rc], %[p], %[a]\n"
229228
" sc.d.rl %[rc], %[rc], %[c]\n"
230229
" bnez %[rc], 0b\n"
231-
" fence rw, rw\n"
230+
RISCV_FULL_BARRIER
232231
"1:\n"
233232
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
234233
: [a]"r" (a), [u]"r" (u)
@@ -248,7 +247,7 @@ static __always_inline bool arch_atomic_inc_unless_negative(atomic_t *v)
248247
" addi %[rc], %[p], 1\n"
249248
" sc.w.rl %[rc], %[rc], %[c]\n"
250249
" bnez %[rc], 0b\n"
251-
" fence rw, rw\n"
250+
RISCV_FULL_BARRIER
252251
"1:\n"
253252
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
254253
:
@@ -268,7 +267,7 @@ static __always_inline bool arch_atomic_dec_unless_positive(atomic_t *v)
268267
" addi %[rc], %[p], -1\n"
269268
" sc.w.rl %[rc], %[rc], %[c]\n"
270269
" bnez %[rc], 0b\n"
271-
" fence rw, rw\n"
270+
RISCV_FULL_BARRIER
272271
"1:\n"
273272
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
274273
:
@@ -288,7 +287,7 @@ static __always_inline int arch_atomic_dec_if_positive(atomic_t *v)
288287
" bltz %[rc], 1f\n"
289288
" sc.w.rl %[rc], %[rc], %[c]\n"
290289
" bnez %[rc], 0b\n"
291-
" fence rw, rw\n"
290+
RISCV_FULL_BARRIER
292291
"1:\n"
293292
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
294293
:
@@ -310,7 +309,7 @@ static __always_inline bool arch_atomic64_inc_unless_negative(atomic64_t *v)
310309
" addi %[rc], %[p], 1\n"
311310
" sc.d.rl %[rc], %[rc], %[c]\n"
312311
" bnez %[rc], 0b\n"
313-
" fence rw, rw\n"
312+
RISCV_FULL_BARRIER
314313
"1:\n"
315314
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
316315
:
@@ -331,7 +330,7 @@ static __always_inline bool arch_atomic64_dec_unless_positive(atomic64_t *v)
331330
" addi %[rc], %[p], -1\n"
332331
" sc.d.rl %[rc], %[rc], %[c]\n"
333332
" bnez %[rc], 0b\n"
334-
" fence rw, rw\n"
333+
RISCV_FULL_BARRIER
335334
"1:\n"
336335
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
337336
:
@@ -352,7 +351,7 @@ static __always_inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
352351
" bltz %[rc], 1f\n"
353352
" sc.d.rl %[rc], %[rc], %[c]\n"
354353
" bnez %[rc], 0b\n"
355-
" fence rw, rw\n"
354+
RISCV_FULL_BARRIER
356355
"1:\n"
357356
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
358357
:

arch/riscv/include/asm/barrier.h

Lines changed: 10 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -11,36 +11,35 @@
1111
#define _ASM_RISCV_BARRIER_H
1212

1313
#ifndef __ASSEMBLY__
14+
#include <asm/fence.h>
1415

1516
#define nop() __asm__ __volatile__ ("nop")
1617
#define __nops(n) ".rept " #n "\nnop\n.endr\n"
1718
#define nops(n) __asm__ __volatile__ (__nops(n))
1819

19-
#define RISCV_FENCE(p, s) \
20-
__asm__ __volatile__ ("fence " #p "," #s : : : "memory")
2120

2221
/* These barriers need to enforce ordering on both devices or memory. */
23-
#define mb() RISCV_FENCE(iorw,iorw)
24-
#define rmb() RISCV_FENCE(ir,ir)
25-
#define wmb() RISCV_FENCE(ow,ow)
22+
#define __mb() RISCV_FENCE(iorw, iorw)
23+
#define __rmb() RISCV_FENCE(ir, ir)
24+
#define __wmb() RISCV_FENCE(ow, ow)
2625

2726
/* These barriers do not need to enforce ordering on devices, just memory. */
28-
#define __smp_mb() RISCV_FENCE(rw,rw)
29-
#define __smp_rmb() RISCV_FENCE(r,r)
30-
#define __smp_wmb() RISCV_FENCE(w,w)
27+
#define __smp_mb() RISCV_FENCE(rw, rw)
28+
#define __smp_rmb() RISCV_FENCE(r, r)
29+
#define __smp_wmb() RISCV_FENCE(w, w)
3130

3231
#define __smp_store_release(p, v) \
3332
do { \
3433
compiletime_assert_atomic_type(*p); \
35-
RISCV_FENCE(rw,w); \
34+
RISCV_FENCE(rw, w); \
3635
WRITE_ONCE(*p, v); \
3736
} while (0)
3837

3938
#define __smp_load_acquire(p) \
4039
({ \
4140
typeof(*p) ___p1 = READ_ONCE(*p); \
4241
compiletime_assert_atomic_type(*p); \
43-
RISCV_FENCE(r,rw); \
42+
RISCV_FENCE(r, rw); \
4443
___p1; \
4544
})
4645

@@ -69,7 +68,7 @@ do { \
6968
* instances the scheduler pairs this with an mb(), so nothing is necessary on
7069
* the new hart.
7170
*/
72-
#define smp_mb__after_spinlock() RISCV_FENCE(iorw,iorw)
71+
#define smp_mb__after_spinlock() RISCV_FENCE(iorw, iorw)
7372

7473
#include <asm-generic/barrier.h>
7574

arch/riscv/include/asm/cmpxchg.h

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,6 @@
88

99
#include <linux/bug.h>
1010

11-
#include <asm/barrier.h>
1211
#include <asm/fence.h>
1312

1413
#define __xchg_relaxed(ptr, new, size) \
@@ -313,7 +312,7 @@
313312
" bne %0, %z3, 1f\n" \
314313
" sc.w.rl %1, %z4, %2\n" \
315314
" bnez %1, 0b\n" \
316-
" fence rw, rw\n" \
315+
RISCV_FULL_BARRIER \
317316
"1:\n" \
318317
: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
319318
: "rJ" ((long)__old), "rJ" (__new) \
@@ -325,7 +324,7 @@
325324
" bne %0, %z3, 1f\n" \
326325
" sc.d.rl %1, %z4, %2\n" \
327326
" bnez %1, 0b\n" \
328-
" fence rw, rw\n" \
327+
RISCV_FULL_BARRIER \
329328
"1:\n" \
330329
: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
331330
: "rJ" (__old), "rJ" (__new) \

arch/riscv/include/asm/fence.h

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,18 @@
11
#ifndef _ASM_RISCV_FENCE_H
22
#define _ASM_RISCV_FENCE_H
33

4+
#define RISCV_FENCE_ASM(p, s) "\tfence " #p "," #s "\n"
5+
#define RISCV_FENCE(p, s) \
6+
({ __asm__ __volatile__ (RISCV_FENCE_ASM(p, s) : : : "memory"); })
7+
48
#ifdef CONFIG_SMP
5-
#define RISCV_ACQUIRE_BARRIER "\tfence r , rw\n"
6-
#define RISCV_RELEASE_BARRIER "\tfence rw, w\n"
9+
#define RISCV_ACQUIRE_BARRIER RISCV_FENCE_ASM(r, rw)
10+
#define RISCV_RELEASE_BARRIER RISCV_FENCE_ASM(rw, w)
11+
#define RISCV_FULL_BARRIER RISCV_FENCE_ASM(rw, rw)
712
#else
813
#define RISCV_ACQUIRE_BARRIER
914
#define RISCV_RELEASE_BARRIER
15+
#define RISCV_FULL_BARRIER
1016
#endif
1117

1218
#endif /* _ASM_RISCV_FENCE_H */

arch/riscv/include/asm/io.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -47,10 +47,10 @@
4747
* sufficient to ensure this works sanely on controllers that support I/O
4848
* writes.
4949
*/
50-
#define __io_pbr() __asm__ __volatile__ ("fence io,i" : : : "memory");
51-
#define __io_par(v) __asm__ __volatile__ ("fence i,ior" : : : "memory");
52-
#define __io_pbw() __asm__ __volatile__ ("fence iow,o" : : : "memory");
53-
#define __io_paw() __asm__ __volatile__ ("fence o,io" : : : "memory");
50+
#define __io_pbr() RISCV_FENCE(io, i)
51+
#define __io_par(v) RISCV_FENCE(i, ior)
52+
#define __io_pbw() RISCV_FENCE(iow, o)
53+
#define __io_paw() RISCV_FENCE(o, io)
5454

5555
/*
5656
* Accesses from a single hart to a single I/O address must be ordered. This

arch/riscv/include/asm/mmio.h

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@
1212
#define _ASM_RISCV_MMIO_H
1313

1414
#include <linux/types.h>
15+
#include <asm/fence.h>
1516
#include <asm/mmiowb.h>
1617

1718
/* Generic IO read/write. These perform native-endian accesses. */
@@ -131,8 +132,8 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
131132
* doesn't define any ordering between the memory space and the I/O space.
132133
*/
133134
#define __io_br() do {} while (0)
134-
#define __io_ar(v) ({ __asm__ __volatile__ ("fence i,ir" : : : "memory"); })
135-
#define __io_bw() ({ __asm__ __volatile__ ("fence w,o" : : : "memory"); })
135+
#define __io_ar(v) RISCV_FENCE(i, ir)
136+
#define __io_bw() RISCV_FENCE(w, o)
136137
#define __io_aw() mmiowb_set_pending()
137138

138139
#define readb(c) ({ u8 __v; __io_br(); __v = readb_cpu(c); __io_ar(__v); __v; })

arch/riscv/include/asm/mmiowb.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
* "o,w" is sufficient to ensure that all writes to the device have completed
88
* before the write to the spinlock is allowed to commit.
99
*/
10-
#define mmiowb() __asm__ __volatile__ ("fence o,w" : : : "memory");
10+
#define mmiowb() RISCV_FENCE(o, w)
1111

1212
#include <linux/smp.h>
1313
#include <asm-generic/mmiowb.h>

0 commit comments

Comments
 (0)