|
107 | 107 | * store NEW in MEM. Return the initial value in MEM. Success is
|
108 | 108 | * indicated by comparing RETURN with OLD.
|
109 | 109 | */
|
110 |
| - |
111 |
| -#define __arch_cmpxchg_masked(sc_sfx, cas_sfx, prepend, append, r, p, o, n) \ |
| 110 | +#define __arch_cmpxchg_masked(sc_sfx, cas_sfx, \ |
| 111 | + sc_prepend, sc_append, \ |
| 112 | + cas_prepend, cas_append, \ |
| 113 | + r, p, o, n) \ |
112 | 114 | ({ \
|
113 | 115 | if (IS_ENABLED(CONFIG_RISCV_ISA_ZABHA) && \
|
114 | 116 | IS_ENABLED(CONFIG_RISCV_ISA_ZACAS) && \
|
|
117 | 119 | r = o; \
|
118 | 120 | \
|
119 | 121 | __asm__ __volatile__ ( \
|
120 |
| - prepend \ |
| 122 | + cas_prepend \ |
121 | 123 | " amocas" cas_sfx " %0, %z2, %1\n" \
|
122 |
| - append \ |
| 124 | + cas_append \ |
123 | 125 | : "+&r" (r), "+A" (*(p)) \
|
124 | 126 | : "rJ" (n) \
|
125 | 127 | : "memory"); \
|
|
134 | 136 | ulong __rc; \
|
135 | 137 | \
|
136 | 138 | __asm__ __volatile__ ( \
|
137 |
| - prepend \ |
| 139 | + sc_prepend \ |
138 | 140 | "0: lr.w %0, %2\n" \
|
139 | 141 | " and %1, %0, %z5\n" \
|
140 | 142 | " bne %1, %z3, 1f\n" \
|
141 | 143 | " and %1, %0, %z6\n" \
|
142 | 144 | " or %1, %1, %z4\n" \
|
143 | 145 | " sc.w" sc_sfx " %1, %1, %2\n" \
|
144 | 146 | " bnez %1, 0b\n" \
|
145 |
| - append \ |
| 147 | + sc_append \ |
146 | 148 | "1:\n" \
|
147 | 149 | : "=&r" (__retx), "=&r" (__rc), "+A" (*(__ptr32b)) \
|
148 | 150 | : "rJ" ((long)__oldx), "rJ" (__newx), \
|
|
153 | 155 | } \
|
154 | 156 | })
|
155 | 157 |
|
156 |
| -#define __arch_cmpxchg(lr_sfx, sc_cas_sfx, prepend, append, r, p, co, o, n) \ |
| 158 | +#define __arch_cmpxchg(lr_sfx, sc_sfx, cas_sfx, \ |
| 159 | + sc_prepend, sc_append, \ |
| 160 | + cas_prepend, cas_append, \ |
| 161 | + r, p, co, o, n) \ |
157 | 162 | ({ \
|
158 | 163 | if (IS_ENABLED(CONFIG_RISCV_ISA_ZACAS) && \
|
159 | 164 | riscv_has_extension_unlikely(RISCV_ISA_EXT_ZACAS)) { \
|
160 | 165 | r = o; \
|
161 | 166 | \
|
162 | 167 | __asm__ __volatile__ ( \
|
163 |
| - prepend \ |
164 |
| - " amocas" sc_cas_sfx " %0, %z2, %1\n" \ |
165 |
| - append \ |
| 168 | + cas_prepend \ |
| 169 | + " amocas" cas_sfx " %0, %z2, %1\n" \ |
| 170 | + cas_append \ |
166 | 171 | : "+&r" (r), "+A" (*(p)) \
|
167 | 172 | : "rJ" (n) \
|
168 | 173 | : "memory"); \
|
169 | 174 | } else { \
|
170 | 175 | register unsigned int __rc; \
|
171 | 176 | \
|
172 | 177 | __asm__ __volatile__ ( \
|
173 |
| - prepend \ |
| 178 | + sc_prepend \ |
174 | 179 | "0: lr" lr_sfx " %0, %2\n" \
|
175 | 180 | " bne %0, %z3, 1f\n" \
|
176 |
| - " sc" sc_cas_sfx " %1, %z4, %2\n" \ |
| 181 | + " sc" sc_sfx " %1, %z4, %2\n" \ |
177 | 182 | " bnez %1, 0b\n" \
|
178 |
| - append \ |
| 183 | + sc_append \ |
179 | 184 | "1:\n" \
|
180 | 185 | : "=&r" (r), "=&r" (__rc), "+A" (*(p)) \
|
181 | 186 | : "rJ" (co o), "rJ" (n) \
|
182 | 187 | : "memory"); \
|
183 | 188 | } \
|
184 | 189 | })
|
185 | 190 |
|
186 |
| -#define _arch_cmpxchg(ptr, old, new, sc_cas_sfx, prepend, append) \ |
| 191 | +#define _arch_cmpxchg(ptr, old, new, sc_sfx, cas_sfx, \ |
| 192 | + sc_prepend, sc_append, \ |
| 193 | + cas_prepend, cas_append) \ |
187 | 194 | ({ \
|
188 | 195 | __typeof__(ptr) __ptr = (ptr); \
|
189 | 196 | __typeof__(*(__ptr)) __old = (old); \
|
|
192 | 199 | \
|
193 | 200 | switch (sizeof(*__ptr)) { \
|
194 | 201 | case 1: \
|
195 |
| - __arch_cmpxchg_masked(sc_cas_sfx, ".b" sc_cas_sfx, \ |
196 |
| - prepend, append, \ |
197 |
| - __ret, __ptr, __old, __new); \ |
| 202 | + __arch_cmpxchg_masked(sc_sfx, ".b" cas_sfx, \ |
| 203 | + sc_prepend, sc_append, \ |
| 204 | + cas_prepend, cas_append, \ |
| 205 | + __ret, __ptr, __old, __new); \ |
198 | 206 | break; \
|
199 | 207 | case 2: \
|
200 |
| - __arch_cmpxchg_masked(sc_cas_sfx, ".h" sc_cas_sfx, \ |
201 |
| - prepend, append, \ |
202 |
| - __ret, __ptr, __old, __new); \ |
| 208 | + __arch_cmpxchg_masked(sc_sfx, ".h" cas_sfx, \ |
| 209 | + sc_prepend, sc_append, \ |
| 210 | + cas_prepend, cas_append, \ |
| 211 | + __ret, __ptr, __old, __new); \ |
203 | 212 | break; \
|
204 | 213 | case 4: \
|
205 |
| - __arch_cmpxchg(".w", ".w" sc_cas_sfx, prepend, append, \ |
206 |
| - __ret, __ptr, (long), __old, __new); \ |
| 214 | + __arch_cmpxchg(".w", ".w" sc_sfx, ".w" cas_sfx, \ |
| 215 | + sc_prepend, sc_append, \ |
| 216 | + cas_prepend, cas_append, \ |
| 217 | + __ret, __ptr, (long), __old, __new); \ |
207 | 218 | break; \
|
208 | 219 | case 8: \
|
209 |
| - __arch_cmpxchg(".d", ".d" sc_cas_sfx, prepend, append, \ |
210 |
| - __ret, __ptr, /**/, __old, __new); \ |
| 220 | + __arch_cmpxchg(".d", ".d" sc_sfx, ".d" cas_sfx, \ |
| 221 | + sc_prepend, sc_append, \ |
| 222 | + cas_prepend, cas_append, \ |
| 223 | + __ret, __ptr, /**/, __old, __new); \ |
211 | 224 | break; \
|
212 | 225 | default: \
|
213 | 226 | BUILD_BUG(); \
|
214 | 227 | } \
|
215 | 228 | (__typeof__(*(__ptr)))__ret; \
|
216 | 229 | })
|
217 | 230 |
|
| 231 | +/* |
| 232 | + * These macros are here to improve the readability of the arch_cmpxchg_XXX() |
| 233 | + * macros. |
| 234 | + */ |
| 235 | +#define SC_SFX(x) x |
| 236 | +#define CAS_SFX(x) x |
| 237 | +#define SC_PREPEND(x) x |
| 238 | +#define SC_APPEND(x) x |
| 239 | +#define CAS_PREPEND(x) x |
| 240 | +#define CAS_APPEND(x) x |
| 241 | + |
218 | 242 | #define arch_cmpxchg_relaxed(ptr, o, n) \
|
219 |
| - _arch_cmpxchg((ptr), (o), (n), "", "", "") |
| 243 | + _arch_cmpxchg((ptr), (o), (n), \ |
| 244 | + SC_SFX(""), CAS_SFX(""), \ |
| 245 | + SC_PREPEND(""), SC_APPEND(""), \ |
| 246 | + CAS_PREPEND(""), CAS_APPEND("")) |
220 | 247 |
|
221 | 248 | #define arch_cmpxchg_acquire(ptr, o, n) \
|
222 |
| - _arch_cmpxchg((ptr), (o), (n), "", "", RISCV_ACQUIRE_BARRIER) |
| 249 | + _arch_cmpxchg((ptr), (o), (n), \ |
| 250 | + SC_SFX(""), CAS_SFX(""), \ |
| 251 | + SC_PREPEND(""), SC_APPEND(RISCV_ACQUIRE_BARRIER), \ |
| 252 | + CAS_PREPEND(""), CAS_APPEND(RISCV_ACQUIRE_BARRIER)) |
223 | 253 |
|
224 | 254 | #define arch_cmpxchg_release(ptr, o, n) \
|
225 |
| - _arch_cmpxchg((ptr), (o), (n), "", RISCV_RELEASE_BARRIER, "") |
| 255 | + _arch_cmpxchg((ptr), (o), (n), \ |
| 256 | + SC_SFX(""), CAS_SFX(""), \ |
| 257 | + SC_PREPEND(RISCV_RELEASE_BARRIER), SC_APPEND(""), \ |
| 258 | + CAS_PREPEND(RISCV_RELEASE_BARRIER), CAS_APPEND("")) |
226 | 259 |
|
227 | 260 | #define arch_cmpxchg(ptr, o, n) \
|
228 |
| - _arch_cmpxchg((ptr), (o), (n), ".rl", "", " fence rw, rw\n") |
| 261 | + _arch_cmpxchg((ptr), (o), (n), \ |
| 262 | + SC_SFX(".rl"), CAS_SFX(".aqrl"), \ |
| 263 | + SC_PREPEND(""), SC_APPEND(RISCV_FULL_BARRIER), \ |
| 264 | + CAS_PREPEND(""), CAS_APPEND("")) |
229 | 265 |
|
230 | 266 | #define arch_cmpxchg_local(ptr, o, n) \
|
231 | 267 | arch_cmpxchg_relaxed((ptr), (o), (n))
|
|
0 commit comments