Skip to content

Commit 4865a27

Browse files
committed
Merge tag 'bitmap-for-6.10v2' of https://github.com/norov/linux
Pull bitmap updates from Yury Norov: - topology_span_sane() optimization from Kyle Meyer - fns() rework from Kuan-Wei Chiu (used in cpumask_local_spread() and other places) - headers cleanup from Andy - add a MAINTAINERS record for bitops API * tag 'bitmap-for-6.10v2' of https://github.com/norov/linux: usercopy: Don't use "proxy" headers bitops: Move aligned_byte_mask() to wordpart.h MAINTAINERS: add BITOPS API record bitmap: relax find_nth_bit() limitation on return value lib: make test_bitops compilable into the kernel image bitops: Optimize fns() for improved performance lib/test_bitops: Add benchmark test for fns() Compiler Attributes: Add __always_used macro sched/topology: Optimize topology_span_sane() cpumask: Add for_each_cpu_from()
2 parents b6394d6 + 5671dca commit 4865a27

File tree

12 files changed

+88
-27
lines changed

12 files changed

+88
-27
lines changed

MAINTAINERS

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3725,6 +3725,20 @@ F: tools/include/vdso/bits.h
37253725
F: tools/lib/bitmap.c
37263726
F: tools/lib/find_bit.c
37273727

3728+
BITOPS API
3729+
M: Yury Norov <yury.norov@gmail.com>
3730+
R: Rasmus Villemoes <linux@rasmusvillemoes.dk>
3731+
S: Maintained
3732+
F: arch/*/include/asm/bitops.h
3733+
F: arch/*/include/asm/bitops_32.h
3734+
F: arch/*/include/asm/bitops_64.h
3735+
F: arch/*/lib/bitops.c
3736+
F: include/asm-generic/bitops
3737+
F: include/asm-generic/bitops.h
3738+
F: include/linux/bitops.h
3739+
F: lib/test_bitops.c
3740+
F: tools/*/bitops*
3741+
37283742
BLINKM RGB LED DRIVER
37293743
M: Jan-Simon Moeller <jansimon.moeller@gmx.de>
37303744
S: Maintained

include/linux/bitops.h

Lines changed: 3 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -8,13 +8,6 @@
88

99
#include <uapi/linux/kernel.h>
1010

11-
/* Set bits in the first 'n' bytes when loaded from memory */
12-
#ifdef __LITTLE_ENDIAN
13-
# define aligned_byte_mask(n) ((1UL << 8*(n))-1)
14-
#else
15-
# define aligned_byte_mask(n) (~0xffUL << (BITS_PER_LONG - 8 - 8*(n)))
16-
#endif
17-
1811
#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
1912
#define BITS_TO_LONGS(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(long))
2013
#define BITS_TO_U64(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(u64))
@@ -257,16 +250,10 @@ static inline unsigned int __ffs64(u64 word)
257250
*/
258251
static inline unsigned int fns(unsigned long word, unsigned int n)
259252
{
260-
unsigned int bit;
261-
262-
while (word) {
263-
bit = __ffs(word);
264-
if (n-- == 0)
265-
return bit;
266-
__clear_bit(bit, &word);
267-
}
253+
while (word && n--)
254+
word &= word - 1;
268255

269-
return BITS_PER_LONG;
256+
return word ? __ffs(word) : BITS_PER_LONG;
270257
}
271258

272259
/**

include/linux/compiler_attributes.h

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -361,6 +361,19 @@
361361
*/
362362
#define __used __attribute__((__used__))
363363

364+
/*
365+
* The __used attribute guarantees that the attributed variable will be
366+
* always emitted by a compiler. It doesn't prevent the compiler from
367+
* throwing 'unused' warnings when it can't detect how the variable is
368+
* actually used. It's a compiler implementation details either emit
369+
* the warning in that case or not.
370+
*
371+
* The combination of both 'used' and 'unused' attributes ensures that
372+
* the variable would be emitted, and will not trigger 'unused' warnings.
373+
* The attribute is applicable for functions, static and global variables.
374+
*/
375+
#define __always_used __used __maybe_unused
376+
364377
/*
365378
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-warn_005funused_005fresult-function-attribute
366379
* clang: https://clang.llvm.org/docs/AttributeReference.html#nodiscard-warn-unused-result

include/linux/cpumask.h

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -385,6 +385,16 @@ unsigned int __pure cpumask_next_wrap(int n, const struct cpumask *mask, int sta
385385
#define for_each_cpu_or(cpu, mask1, mask2) \
386386
for_each_or_bit(cpu, cpumask_bits(mask1), cpumask_bits(mask2), small_cpumask_bits)
387387

388+
/**
389+
* for_each_cpu_from - iterate over CPUs present in @mask, from @cpu to the end of @mask.
390+
* @cpu: the (optionally unsigned) integer iterator
391+
* @mask: the cpumask pointer
392+
*
393+
* After the loop, cpu is >= nr_cpu_ids.
394+
*/
395+
#define for_each_cpu_from(cpu, mask) \
396+
for_each_set_bit_from(cpu, cpumask_bits(mask), small_cpumask_bits)
397+
388398
/**
389399
* cpumask_any_but - return a "random" in a cpumask, but not this one.
390400
* @mask: the cpumask to search

include/linux/find.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -222,7 +222,7 @@ unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
222222
* idx = find_first_bit(addr, size);
223223
*
224224
* Returns the bit number of the N'th set bit.
225-
* If no such, returns @size.
225+
* If no such, returns >= @size.
226226
*/
227227
static inline
228228
unsigned long find_nth_bit(const unsigned long *addr, unsigned long size, unsigned long n)

include/linux/wordpart.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -39,4 +39,11 @@
3939
*/
4040
#define REPEAT_BYTE(x) ((~0ul / 0xff) * (x))
4141

42+
/* Set bits in the first 'n' bytes when loaded from memory */
43+
#ifdef __LITTLE_ENDIAN
44+
# define aligned_byte_mask(n) ((1UL << 8*(n))-1)
45+
#else
46+
# define aligned_byte_mask(n) (~0xffUL << (BITS_PER_LONG - 8 - 8*(n)))
47+
#endif
48+
4249
#endif // _LINUX_WORDPART_H

kernel/sched/topology.c

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2353,7 +2353,7 @@ static struct sched_domain *build_sched_domain(struct sched_domain_topology_leve
23532353
static bool topology_span_sane(struct sched_domain_topology_level *tl,
23542354
const struct cpumask *cpu_map, int cpu)
23552355
{
2356-
int i;
2356+
int i = cpu + 1;
23572357

23582358
/* NUMA levels are allowed to overlap */
23592359
if (tl->flags & SDTL_OVERLAP)
@@ -2365,9 +2365,7 @@ static bool topology_span_sane(struct sched_domain_topology_level *tl,
23652365
* breaking the sched_group lists - i.e. a later get_group() pass
23662366
* breaks the linking done for an earlier span.
23672367
*/
2368-
for_each_cpu(i, cpu_map) {
2369-
if (i == cpu)
2370-
continue;
2368+
for_each_cpu_from(i, cpu_map) {
23712369
/*
23722370
* We should 'and' all those masks with 'cpu_map' to exactly
23732371
* match the topology we're about to build, but that can only

lib/Kconfig.debug

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2482,7 +2482,6 @@ config TEST_LKM
24822482

24832483
config TEST_BITOPS
24842484
tristate "Test module for compilation of bitops operations"
2485-
depends on m
24862485
help
24872486
This builds the "test_bitops" module that is much like the
24882487
TEST_LKM module except that it does a basic exercise of the

lib/find_bit.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -87,7 +87,7 @@ out: \
8787
if (sz % BITS_PER_LONG) \
8888
tmp = (FETCH) & BITMAP_LAST_WORD_MASK(sz); \
8989
found: \
90-
sz = min(idx * BITS_PER_LONG + fns(tmp, nr), sz); \
90+
sz = idx * BITS_PER_LONG + fns(tmp, nr); \
9191
out: \
9292
sz; \
9393
})

lib/test_bitmap.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -244,7 +244,7 @@ static void __init test_find_nth_bit(void)
244244
expect_eq_uint(60, find_nth_bit(bmap, 64 * 3, 5));
245245
expect_eq_uint(80, find_nth_bit(bmap, 64 * 3, 6));
246246
expect_eq_uint(123, find_nth_bit(bmap, 64 * 3, 7));
247-
expect_eq_uint(64 * 3, find_nth_bit(bmap, 64 * 3, 8));
247+
expect_eq_uint(0, !!(find_nth_bit(bmap, 64 * 3, 8) < 64 * 3));
248248

249249
expect_eq_uint(10, find_nth_bit(bmap, 64 * 3 - 1, 0));
250250
expect_eq_uint(20, find_nth_bit(bmap, 64 * 3 - 1, 1));
@@ -254,7 +254,7 @@ static void __init test_find_nth_bit(void)
254254
expect_eq_uint(60, find_nth_bit(bmap, 64 * 3 - 1, 5));
255255
expect_eq_uint(80, find_nth_bit(bmap, 64 * 3 - 1, 6));
256256
expect_eq_uint(123, find_nth_bit(bmap, 64 * 3 - 1, 7));
257-
expect_eq_uint(64 * 3 - 1, find_nth_bit(bmap, 64 * 3 - 1, 8));
257+
expect_eq_uint(0, !!(find_nth_bit(bmap, 64 * 3 - 1, 8) < 64 * 3 - 1));
258258

259259
for_each_set_bit(bit, exp1, EXP1_IN_BITS) {
260260
b = find_nth_bit(exp1, EXP1_IN_BITS, cnt++);

0 commit comments

Comments
 (0)