Skip to content

Commit 6116e22

Browse files
Alexandre Ghitipalmer-dabbelt
authored andcommitted
riscv: Improve zacas fully-ordered cmpxchg()
The current fully-ordered cmpxchgXX() implementation results in: amocas.X.rl a5,a4,(s1) fence rw,rw This provides enough sync but we can actually use the following better mapping instead: amocas.X.aqrl a5,a4,(s1) Suggested-by: Andrea Parri <andrea@rivosinc.com> Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com> Reviewed-by: Andrew Jones <ajones@ventanamicro.com> Reviewed-by: Andrea Parri <parri.andrea@gmail.com> Link: https://lore.kernel.org/r/20241103145153.105097-7-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
1 parent 1658ef4 commit 6116e22

File tree

1 file changed

+64
-28
lines changed

1 file changed

+64
-28
lines changed

arch/riscv/include/asm/cmpxchg.h

Lines changed: 64 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -107,8 +107,10 @@
107107
* store NEW in MEM. Return the initial value in MEM. Success is
108108
* indicated by comparing RETURN with OLD.
109109
*/
110-
111-
#define __arch_cmpxchg_masked(sc_sfx, cas_sfx, prepend, append, r, p, o, n) \
110+
#define __arch_cmpxchg_masked(sc_sfx, cas_sfx, \
111+
sc_prepend, sc_append, \
112+
cas_prepend, cas_append, \
113+
r, p, o, n) \
112114
({ \
113115
if (IS_ENABLED(CONFIG_RISCV_ISA_ZABHA) && \
114116
IS_ENABLED(CONFIG_RISCV_ISA_ZACAS) && \
@@ -117,9 +119,9 @@
117119
r = o; \
118120
\
119121
__asm__ __volatile__ ( \
120-
prepend \
122+
cas_prepend \
121123
" amocas" cas_sfx " %0, %z2, %1\n" \
122-
append \
124+
cas_append \
123125
: "+&r" (r), "+A" (*(p)) \
124126
: "rJ" (n) \
125127
: "memory"); \
@@ -134,15 +136,15 @@
134136
ulong __rc; \
135137
\
136138
__asm__ __volatile__ ( \
137-
prepend \
139+
sc_prepend \
138140
"0: lr.w %0, %2\n" \
139141
" and %1, %0, %z5\n" \
140142
" bne %1, %z3, 1f\n" \
141143
" and %1, %0, %z6\n" \
142144
" or %1, %1, %z4\n" \
143145
" sc.w" sc_sfx " %1, %1, %2\n" \
144146
" bnez %1, 0b\n" \
145-
append \
147+
sc_append \
146148
"1:\n" \
147149
: "=&r" (__retx), "=&r" (__rc), "+A" (*(__ptr32b)) \
148150
: "rJ" ((long)__oldx), "rJ" (__newx), \
@@ -153,37 +155,42 @@
153155
} \
154156
})
155157

156-
#define __arch_cmpxchg(lr_sfx, sc_cas_sfx, prepend, append, r, p, co, o, n) \
158+
#define __arch_cmpxchg(lr_sfx, sc_sfx, cas_sfx, \
159+
sc_prepend, sc_append, \
160+
cas_prepend, cas_append, \
161+
r, p, co, o, n) \
157162
({ \
158163
if (IS_ENABLED(CONFIG_RISCV_ISA_ZACAS) && \
159164
riscv_has_extension_unlikely(RISCV_ISA_EXT_ZACAS)) { \
160165
r = o; \
161166
\
162167
__asm__ __volatile__ ( \
163-
prepend \
164-
" amocas" sc_cas_sfx " %0, %z2, %1\n" \
165-
append \
168+
cas_prepend \
169+
" amocas" cas_sfx " %0, %z2, %1\n" \
170+
cas_append \
166171
: "+&r" (r), "+A" (*(p)) \
167172
: "rJ" (n) \
168173
: "memory"); \
169174
} else { \
170175
register unsigned int __rc; \
171176
\
172177
__asm__ __volatile__ ( \
173-
prepend \
178+
sc_prepend \
174179
"0: lr" lr_sfx " %0, %2\n" \
175180
" bne %0, %z3, 1f\n" \
176-
" sc" sc_cas_sfx " %1, %z4, %2\n" \
181+
" sc" sc_sfx " %1, %z4, %2\n" \
177182
" bnez %1, 0b\n" \
178-
append \
183+
sc_append \
179184
"1:\n" \
180185
: "=&r" (r), "=&r" (__rc), "+A" (*(p)) \
181186
: "rJ" (co o), "rJ" (n) \
182187
: "memory"); \
183188
} \
184189
})
185190

186-
#define _arch_cmpxchg(ptr, old, new, sc_cas_sfx, prepend, append) \
191+
#define _arch_cmpxchg(ptr, old, new, sc_sfx, cas_sfx, \
192+
sc_prepend, sc_append, \
193+
cas_prepend, cas_append) \
187194
({ \
188195
__typeof__(ptr) __ptr = (ptr); \
189196
__typeof__(*(__ptr)) __old = (old); \
@@ -192,40 +199,69 @@
192199
\
193200
switch (sizeof(*__ptr)) { \
194201
case 1: \
195-
__arch_cmpxchg_masked(sc_cas_sfx, ".b" sc_cas_sfx, \
196-
prepend, append, \
197-
__ret, __ptr, __old, __new); \
202+
__arch_cmpxchg_masked(sc_sfx, ".b" cas_sfx, \
203+
sc_prepend, sc_append, \
204+
cas_prepend, cas_append, \
205+
__ret, __ptr, __old, __new); \
198206
break; \
199207
case 2: \
200-
__arch_cmpxchg_masked(sc_cas_sfx, ".h" sc_cas_sfx, \
201-
prepend, append, \
202-
__ret, __ptr, __old, __new); \
208+
__arch_cmpxchg_masked(sc_sfx, ".h" cas_sfx, \
209+
sc_prepend, sc_append, \
210+
cas_prepend, cas_append, \
211+
__ret, __ptr, __old, __new); \
203212
break; \
204213
case 4: \
205-
__arch_cmpxchg(".w", ".w" sc_cas_sfx, prepend, append, \
206-
__ret, __ptr, (long), __old, __new); \
214+
__arch_cmpxchg(".w", ".w" sc_sfx, ".w" cas_sfx, \
215+
sc_prepend, sc_append, \
216+
cas_prepend, cas_append, \
217+
__ret, __ptr, (long), __old, __new); \
207218
break; \
208219
case 8: \
209-
__arch_cmpxchg(".d", ".d" sc_cas_sfx, prepend, append, \
210-
__ret, __ptr, /**/, __old, __new); \
220+
__arch_cmpxchg(".d", ".d" sc_sfx, ".d" cas_sfx, \
221+
sc_prepend, sc_append, \
222+
cas_prepend, cas_append, \
223+
__ret, __ptr, /**/, __old, __new); \
211224
break; \
212225
default: \
213226
BUILD_BUG(); \
214227
} \
215228
(__typeof__(*(__ptr)))__ret; \
216229
})
217230

231+
/*
232+
* These macros are here to improve the readability of the arch_cmpxchg_XXX()
233+
* macros.
234+
*/
235+
#define SC_SFX(x) x
236+
#define CAS_SFX(x) x
237+
#define SC_PREPEND(x) x
238+
#define SC_APPEND(x) x
239+
#define CAS_PREPEND(x) x
240+
#define CAS_APPEND(x) x
241+
218242
#define arch_cmpxchg_relaxed(ptr, o, n) \
219-
_arch_cmpxchg((ptr), (o), (n), "", "", "")
243+
_arch_cmpxchg((ptr), (o), (n), \
244+
SC_SFX(""), CAS_SFX(""), \
245+
SC_PREPEND(""), SC_APPEND(""), \
246+
CAS_PREPEND(""), CAS_APPEND(""))
220247

221248
#define arch_cmpxchg_acquire(ptr, o, n) \
222-
_arch_cmpxchg((ptr), (o), (n), "", "", RISCV_ACQUIRE_BARRIER)
249+
_arch_cmpxchg((ptr), (o), (n), \
250+
SC_SFX(""), CAS_SFX(""), \
251+
SC_PREPEND(""), SC_APPEND(RISCV_ACQUIRE_BARRIER), \
252+
CAS_PREPEND(""), CAS_APPEND(RISCV_ACQUIRE_BARRIER))
223253

224254
#define arch_cmpxchg_release(ptr, o, n) \
225-
_arch_cmpxchg((ptr), (o), (n), "", RISCV_RELEASE_BARRIER, "")
255+
_arch_cmpxchg((ptr), (o), (n), \
256+
SC_SFX(""), CAS_SFX(""), \
257+
SC_PREPEND(RISCV_RELEASE_BARRIER), SC_APPEND(""), \
258+
CAS_PREPEND(RISCV_RELEASE_BARRIER), CAS_APPEND(""))
226259

227260
#define arch_cmpxchg(ptr, o, n) \
228-
_arch_cmpxchg((ptr), (o), (n), ".rl", "", " fence rw, rw\n")
261+
_arch_cmpxchg((ptr), (o), (n), \
262+
SC_SFX(".rl"), CAS_SFX(".aqrl"), \
263+
SC_PREPEND(""), SC_APPEND(RISCV_FULL_BARRIER), \
264+
CAS_PREPEND(""), CAS_APPEND(""))
229265

230266
#define arch_cmpxchg_local(ptr, o, n) \
231267
arch_cmpxchg_relaxed((ptr), (o), (n))

0 commit comments

Comments
 (0)