File tree Expand file tree Collapse file tree 7 files changed +16
-14
lines changed Expand file tree Collapse file tree 7 files changed +16
-14
lines changed Original file line number Diff line number Diff line change 17
17
#endif
18
18
19
19
#include <asm/cmpxchg.h>
20
- #include <asm/barrier.h>
21
20
22
21
#define __atomic_acquire_fence () \
23
22
__asm__ __volatile__(RISCV_ACQUIRE_BARRIER "" ::: "memory")
Original file line number Diff line number Diff line change 11
11
#define _ASM_RISCV_BARRIER_H
12
12
13
13
#ifndef __ASSEMBLY__
14
+ #include <asm/fence.h>
14
15
15
16
#define nop () __asm__ __volatile__ ("nop")
16
17
#define __nops (n ) ".rept " #n "\nnop\n.endr\n"
17
18
#define nops (n ) __asm__ __volatile__ (__nops(n))
18
19
19
- #define RISCV_FENCE (p , s ) \
20
- __asm__ __volatile__ ("fence " #p "," #s : : : "memory")
21
20
22
21
/* These barriers need to enforce ordering on both devices or memory. */
23
22
#define __mb () RISCV_FENCE(iorw, iorw)
Original file line number Diff line number Diff line change 8
8
9
9
#include <linux/bug.h>
10
10
11
- #include <asm/barrier.h>
12
11
#include <asm/fence.h>
13
12
14
13
#define __xchg_relaxed (ptr , new , size ) \
Original file line number Diff line number Diff line change 1
1
#ifndef _ASM_RISCV_FENCE_H
2
2
#define _ASM_RISCV_FENCE_H
3
3
4
+ #define RISCV_FENCE_ASM (p , s ) "\tfence " #p "," #s "\n"
5
+ #define RISCV_FENCE (p , s ) \
6
+ ({ __asm__ __volatile__ (RISCV_FENCE_ASM(p, s) : : : "memory"); })
7
+
4
8
#ifdef CONFIG_SMP
5
- #define RISCV_ACQUIRE_BARRIER "\tfence r , rw\n"
6
- #define RISCV_RELEASE_BARRIER "\tfence rw, w\n"
7
- #define RISCV_FULL_BARRIER "\tfence rw, rw\n"
9
+ #define RISCV_ACQUIRE_BARRIER RISCV_FENCE_ASM(r , rw)
10
+ #define RISCV_RELEASE_BARRIER RISCV_FENCE_ASM( rw, w)
11
+ #define RISCV_FULL_BARRIER RISCV_FENCE_ASM( rw, rw)
8
12
#else
9
13
#define RISCV_ACQUIRE_BARRIER
10
14
#define RISCV_RELEASE_BARRIER
Original file line number Diff line number Diff line change 47
47
* sufficient to ensure this works sanely on controllers that support I/O
48
48
* writes.
49
49
*/
50
- #define __io_pbr () __asm__ __volatile__ ("fence io,i" : : : "memory");
51
- #define __io_par (v ) __asm__ __volatile__ ("fence i,ior" : : : "memory");
52
- #define __io_pbw () __asm__ __volatile__ ("fence iow,o" : : : "memory");
53
- #define __io_paw () __asm__ __volatile__ ("fence o,io" : : : "memory");
50
+ #define __io_pbr () RISCV_FENCE( io, i)
51
+ #define __io_par (v ) RISCV_FENCE(i, ior)
52
+ #define __io_pbw () RISCV_FENCE( iow, o)
53
+ #define __io_paw () RISCV_FENCE(o, io)
54
54
55
55
/*
56
56
* Accesses from a single hart to a single I/O address must be ordered. This
Original file line number Diff line number Diff line change 12
12
#define _ASM_RISCV_MMIO_H
13
13
14
14
#include <linux/types.h>
15
+ #include <asm/fence.h>
15
16
#include <asm/mmiowb.h>
16
17
17
18
/* Generic IO read/write. These perform native-endian accesses. */
@@ -131,8 +132,8 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
131
132
* doesn't define any ordering between the memory space and the I/O space.
132
133
*/
133
134
#define __io_br () do {} while (0)
134
- #define __io_ar (v ) ({ __asm__ __volatile__ ("fence i,ir" : : : "memory"); } )
135
- #define __io_bw () ({ __asm__ __volatile__ ("fence w,o" : : : "memory"); } )
135
+ #define __io_ar (v ) RISCV_FENCE(i, ir )
136
+ #define __io_bw () RISCV_FENCE(w, o )
136
137
#define __io_aw () mmiowb_set_pending()
137
138
138
139
#define readb (c ) ({ u8 __v; __io_br(); __v = readb_cpu(c); __io_ar(__v); __v; })
Original file line number Diff line number Diff line change 7
7
* "o,w" is sufficient to ensure that all writes to the device have completed
8
8
* before the write to the spinlock is allowed to commit.
9
9
*/
10
- #define mmiowb () __asm__ __volatile__ ("fence o,w" : : : "memory");
10
+ #define mmiowb () RISCV_FENCE(o, w)
11
11
12
12
#include <linux/smp.h>
13
13
#include <asm-generic/mmiowb.h>
You can’t perform that action at this time.
0 commit comments