Skip to content

Commit f9d2ee3

Browse files
nathanchanceYuryNorov
authored andcommitted
riscv: Always inline bitops
When building allmodconfig + ThinLTO with certain versions of clang, arch_set_bit() may not be inlined, resulting in a modpost warning: WARNING: modpost: vmlinux: section mismatch in reference: arch_set_bit+0x58 (section: .text.arch_set_bit) -> numa_nodes_parsed (section: .init.data) acpi_numa_rintc_affinity_init() calls arch_set_bit() via __node_set() with numa_nodes_parsed, which is marked as __initdata. If arch_set_bit() is not inlined, modpost will flag that it is being called with data that will be freed after init. As acpi_numa_rintc_affinity_init() is marked as __init, there is not actually a functional issue here. However, the bitop functions should be marked as __always_inline, so that they work consistently for init and non-init code, which the comment in include/linux/nodemask.h alludes to. This matches s390 and x86's implementations. Signed-off-by: Nathan Chancellor <nathan@kernel.org> Signed-off-by: Yury Norov <yury.norov@gmail.com>
1 parent 4463a44 commit f9d2ee3

File tree

1 file changed

+10
-10
lines changed

1 file changed

+10
-10
lines changed

arch/riscv/include/asm/bitops.h

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -228,7 +228,7 @@ static __always_inline int variable_fls(unsigned int x)
228228
*
229229
* This operation may be reordered on other architectures than x86.
230230
*/
231-
static inline int arch_test_and_set_bit(int nr, volatile unsigned long *addr)
231+
static __always_inline int arch_test_and_set_bit(int nr, volatile unsigned long *addr)
232232
{
233233
return __test_and_op_bit(or, __NOP, nr, addr);
234234
}
@@ -240,7 +240,7 @@ static inline int arch_test_and_set_bit(int nr, volatile unsigned long *addr)
240240
*
241241
* This operation can be reordered on other architectures other than x86.
242242
*/
243-
static inline int arch_test_and_clear_bit(int nr, volatile unsigned long *addr)
243+
static __always_inline int arch_test_and_clear_bit(int nr, volatile unsigned long *addr)
244244
{
245245
return __test_and_op_bit(and, __NOT, nr, addr);
246246
}
@@ -253,7 +253,7 @@ static inline int arch_test_and_clear_bit(int nr, volatile unsigned long *addr)
253253
* This operation is atomic and cannot be reordered.
254254
* It also implies a memory barrier.
255255
*/
256-
static inline int arch_test_and_change_bit(int nr, volatile unsigned long *addr)
256+
static __always_inline int arch_test_and_change_bit(int nr, volatile unsigned long *addr)
257257
{
258258
return __test_and_op_bit(xor, __NOP, nr, addr);
259259
}
@@ -270,7 +270,7 @@ static inline int arch_test_and_change_bit(int nr, volatile unsigned long *addr)
270270
* Note that @nr may be almost arbitrarily large; this function is not
271271
* restricted to acting on a single-word quantity.
272272
*/
273-
static inline void arch_set_bit(int nr, volatile unsigned long *addr)
273+
static __always_inline void arch_set_bit(int nr, volatile unsigned long *addr)
274274
{
275275
__op_bit(or, __NOP, nr, addr);
276276
}
@@ -284,7 +284,7 @@ static inline void arch_set_bit(int nr, volatile unsigned long *addr)
284284
* on non x86 architectures, so if you are writing portable code,
285285
* make sure not to rely on its reordering guarantees.
286286
*/
287-
static inline void arch_clear_bit(int nr, volatile unsigned long *addr)
287+
static __always_inline void arch_clear_bit(int nr, volatile unsigned long *addr)
288288
{
289289
__op_bit(and, __NOT, nr, addr);
290290
}
@@ -298,7 +298,7 @@ static inline void arch_clear_bit(int nr, volatile unsigned long *addr)
298298
* Note that @nr may be almost arbitrarily large; this function is not
299299
* restricted to acting on a single-word quantity.
300300
*/
301-
static inline void arch_change_bit(int nr, volatile unsigned long *addr)
301+
static __always_inline void arch_change_bit(int nr, volatile unsigned long *addr)
302302
{
303303
__op_bit(xor, __NOP, nr, addr);
304304
}
@@ -311,7 +311,7 @@ static inline void arch_change_bit(int nr, volatile unsigned long *addr)
311311
* This operation is atomic and provides acquire barrier semantics.
312312
* It can be used to implement bit locks.
313313
*/
314-
static inline int arch_test_and_set_bit_lock(
314+
static __always_inline int arch_test_and_set_bit_lock(
315315
unsigned long nr, volatile unsigned long *addr)
316316
{
317317
return __test_and_op_bit_ord(or, __NOP, nr, addr, .aq);
@@ -324,7 +324,7 @@ static inline int arch_test_and_set_bit_lock(
324324
*
325325
* This operation is atomic and provides release barrier semantics.
326326
*/
327-
static inline void arch_clear_bit_unlock(
327+
static __always_inline void arch_clear_bit_unlock(
328328
unsigned long nr, volatile unsigned long *addr)
329329
{
330330
__op_bit_ord(and, __NOT, nr, addr, .rl);
@@ -345,13 +345,13 @@ static inline void arch_clear_bit_unlock(
345345
* non-atomic property here: it's a lot more instructions and we still have to
346346
* provide release semantics anyway.
347347
*/
348-
static inline void arch___clear_bit_unlock(
348+
static __always_inline void arch___clear_bit_unlock(
349349
unsigned long nr, volatile unsigned long *addr)
350350
{
351351
arch_clear_bit_unlock(nr, addr);
352352
}
353353

354-
static inline bool arch_xor_unlock_is_negative_byte(unsigned long mask,
354+
static __always_inline bool arch_xor_unlock_is_negative_byte(unsigned long mask,
355355
volatile unsigned long *addr)
356356
{
357357
unsigned long res;

0 commit comments

Comments
 (0)