Skip to content

Commit 2f2d529

Browse files
committed
Merge tag 'bitmap-for-6.15' of https://github.com/norov/linux
Pull bitmap updates from Yury Norov: - cpumask_next_wrap() rework (me) - GENMASK() simplification (I Hsin) - rust bindings for cpumasks (Viresh and me) - scattered cleanups (Andy, Tamir, Vincent, Ignacio and Joel) * tag 'bitmap-for-6.15' of https://github.com/norov/linux: (22 commits) cpumask: align text in comment riscv: fix test_and_{set,clear}_bit ordering documentation treewide: fix typo 'unsigned __init128' -> 'unsigned __int128' MAINTAINERS: add rust bindings entry for bitmap API rust: Add cpumask helpers uapi: Revert "bitops: avoid integer overflow in GENMASK(_ULL)" cpumask: drop cpumask_next_wrap_old() PCI: hv: Switch hv_compose_multi_msi_req_get_cpu() to using cpumask_next_wrap() scsi: lpfc: rework lpfc_next_{online,present}_cpu() scsi: lpfc: switch lpfc_irq_rebalance() to using cpumask_next_wrap() s390: switch stop_machine_yield() to using cpumask_next_wrap() padata: switch padata_find_next() to using cpumask_next_wrap() cpumask: use cpumask_next_wrap() where appropriate cpumask: re-introduce cpumask_next{,_and}_wrap() cpumask: deprecate cpumask_next_wrap() powerpc/xmon: simplify xmon_batch_next_cpu() ibmvnic: simplify ibmvnic_set_queue_affinity() virtio_net: simplify virtnet_set_affinity() objpool: rework objpool_pop() cpumask: add for_each_{possible,online}_cpu_wrap ...
2 parents f81c2b8 + 1cf8e15 commit 2f2d529

File tree

23 files changed

+147
-144
lines changed

23 files changed

+147
-144
lines changed

MAINTAINERS

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4026,6 +4026,11 @@ F: tools/include/vdso/bits.h
40264026
F: tools/lib/bitmap.c
40274027
F: tools/lib/find_bit.c
40284028

4029+
BITMAP API BINDINGS [RUST]
4030+
M: Yury Norov <yury.norov@gmail.com>
4031+
S: Maintained
4032+
F: rust/helpers/cpumask.c
4033+
40294034
BITOPS API
40304035
M: Yury Norov <yury.norov@gmail.com>
40314036
R: Rasmus Villemoes <linux@rasmusvillemoes.dk>

arch/powerpc/xmon/xmon.c

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1271,11 +1271,7 @@ static int xmon_batch_next_cpu(void)
12711271
{
12721272
unsigned long cpu;
12731273

1274-
while (!cpumask_empty(&xmon_batch_cpus)) {
1275-
cpu = cpumask_next_wrap(smp_processor_id(), &xmon_batch_cpus,
1276-
xmon_batch_start_cpu, true);
1277-
if (cpu >= nr_cpu_ids)
1278-
break;
1274+
for_each_cpu_wrap(cpu, &xmon_batch_cpus, xmon_batch_start_cpu) {
12791275
if (xmon_batch_start_cpu == -1)
12801276
xmon_batch_start_cpu = cpu;
12811277
if (xmon_switch_cpu(cpu))

arch/riscv/include/asm/bitops.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -226,7 +226,7 @@ static __always_inline int variable_fls(unsigned int x)
226226
* @nr: Bit to set
227227
* @addr: Address to count from
228228
*
229-
* This operation may be reordered on other architectures than x86.
229+
* This is an atomic fully-ordered operation (implied full memory barrier).
230230
*/
231231
static __always_inline int arch_test_and_set_bit(int nr, volatile unsigned long *addr)
232232
{
@@ -238,7 +238,7 @@ static __always_inline int arch_test_and_set_bit(int nr, volatile unsigned long
238238
* @nr: Bit to clear
239239
* @addr: Address to count from
240240
*
241-
* This operation can be reordered on other architectures other than x86.
241+
* This is an atomic fully-ordered operation (implied full memory barrier).
242242
*/
243243
static __always_inline int arch_test_and_clear_bit(int nr, volatile unsigned long *addr)
244244
{

arch/s390/kernel/processor.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,7 @@ void notrace stop_machine_yield(const struct cpumask *cpumask)
7272
this_cpu = smp_processor_id();
7373
if (__this_cpu_inc_return(cpu_relax_retry) >= spin_retry) {
7474
__this_cpu_write(cpu_relax_retry, 0);
75-
cpu = cpumask_next_wrap(this_cpu, cpumask, this_cpu, false);
75+
cpu = cpumask_next_wrap(this_cpu, cpumask);
7676
if (cpu >= nr_cpu_ids)
7777
return;
7878
if (arch_vcpu_is_preempted(cpu))

drivers/net/ethernet/ibm/ibmvnic.c

Lines changed: 11 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -234,11 +234,17 @@ static int ibmvnic_set_queue_affinity(struct ibmvnic_sub_crq_queue *queue,
234234
(*stragglers)--;
235235
}
236236
/* atomic write is safer than writing bit by bit directly */
237-
for (i = 0; i < stride; i++) {
238-
cpumask_set_cpu(*cpu, mask);
239-
*cpu = cpumask_next_wrap(*cpu, cpu_online_mask,
240-
nr_cpu_ids, false);
237+
for_each_online_cpu_wrap(i, *cpu) {
238+
if (!stride--) {
239+
/* For the next queue we start from the first
240+
* unused CPU in this queue
241+
*/
242+
*cpu = i;
243+
break;
244+
}
245+
cpumask_set_cpu(i, mask);
241246
}
247+
242248
/* set queue affinity mask */
243249
cpumask_copy(queue->affinity_mask, mask);
244250
rc = irq_set_affinity_and_hint(queue->irq, queue->affinity_mask);
@@ -256,7 +262,7 @@ static void ibmvnic_set_affinity(struct ibmvnic_adapter *adapter)
256262
int num_rxqs = adapter->num_active_rx_scrqs, i_rxqs = 0;
257263
int num_txqs = adapter->num_active_tx_scrqs, i_txqs = 0;
258264
int total_queues, stride, stragglers, i;
259-
unsigned int num_cpu, cpu;
265+
unsigned int num_cpu, cpu = 0;
260266
bool is_rx_queue;
261267
int rc = 0;
262268

@@ -274,8 +280,6 @@ static void ibmvnic_set_affinity(struct ibmvnic_adapter *adapter)
274280
stride = max_t(int, num_cpu / total_queues, 1);
275281
/* number of leftover cpu's */
276282
stragglers = num_cpu >= total_queues ? num_cpu % total_queues : 0;
277-
/* next available cpu to assign irq to */
278-
cpu = cpumask_next(-1, cpu_online_mask);
279283

280284
for (i = 0; i < total_queues; i++) {
281285
is_rx_queue = false;

drivers/net/virtio_net.c

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -3826,7 +3826,7 @@ static void virtnet_set_affinity(struct virtnet_info *vi)
38263826
cpumask_var_t mask;
38273827
int stragglers;
38283828
int group_size;
3829-
int i, j, cpu;
3829+
int i, start = 0, cpu;
38303830
int num_cpu;
38313831
int stride;
38323832

@@ -3840,16 +3840,18 @@ static void virtnet_set_affinity(struct virtnet_info *vi)
38403840
stragglers = num_cpu >= vi->curr_queue_pairs ?
38413841
num_cpu % vi->curr_queue_pairs :
38423842
0;
3843-
cpu = cpumask_first(cpu_online_mask);
38443843

38453844
for (i = 0; i < vi->curr_queue_pairs; i++) {
38463845
group_size = stride + (i < stragglers ? 1 : 0);
38473846

3848-
for (j = 0; j < group_size; j++) {
3847+
for_each_online_cpu_wrap(cpu, start) {
3848+
if (!group_size--) {
3849+
start = cpu;
3850+
break;
3851+
}
38493852
cpumask_set_cpu(cpu, mask);
3850-
cpu = cpumask_next_wrap(cpu, cpu_online_mask,
3851-
nr_cpu_ids, false);
38523853
}
3854+
38533855
virtqueue_set_affinity(vi->rq[i].vq, mask);
38543856
virtqueue_set_affinity(vi->sq[i].vq, mask);
38553857
__netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, XPS_CPUS);

drivers/pci/controller/pci-hyperv.c

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1757,8 +1757,7 @@ static int hv_compose_multi_msi_req_get_cpu(void)
17571757

17581758
spin_lock_irqsave(&multi_msi_cpu_lock, flags);
17591759

1760-
cpu_next = cpumask_next_wrap(cpu_next, cpu_online_mask, nr_cpu_ids,
1761-
false);
1760+
cpu_next = cpumask_next_wrap(cpu_next, cpu_online_mask);
17621761
cpu = cpu_next;
17631762

17641763
spin_unlock_irqrestore(&multi_msi_cpu_lock, flags);

drivers/scsi/lpfc/lpfc.h

Lines changed: 5 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -1715,35 +1715,22 @@ lpfc_phba_elsring(struct lpfc_hba *phba)
17151715
* Note: If no valid cpu found, then nr_cpu_ids is returned.
17161716
*
17171717
**/
1718-
static inline unsigned int
1718+
static __always_inline unsigned int
17191719
lpfc_next_online_cpu(const struct cpumask *mask, unsigned int start)
17201720
{
1721-
unsigned int cpu_it;
1722-
1723-
for_each_cpu_wrap(cpu_it, mask, start) {
1724-
if (cpu_online(cpu_it))
1725-
break;
1726-
}
1727-
1728-
return cpu_it;
1721+
return cpumask_next_and_wrap(start, mask, cpu_online_mask);
17291722
}
1723+
17301724
/**
17311725
* lpfc_next_present_cpu - Finds next present CPU after n
17321726
* @n: the cpu prior to search
17331727
*
17341728
* Note: If no next present cpu, then fallback to first present cpu.
17351729
*
17361730
**/
1737-
static inline unsigned int lpfc_next_present_cpu(int n)
1731+
static __always_inline unsigned int lpfc_next_present_cpu(int n)
17381732
{
1739-
unsigned int cpu;
1740-
1741-
cpu = cpumask_next(n, cpu_present_mask);
1742-
1743-
if (cpu >= nr_cpu_ids)
1744-
cpu = cpumask_first(cpu_present_mask);
1745-
1746-
return cpu;
1733+
return cpumask_next_wrap(n, cpu_present_mask);
17471734
}
17481735

17491736
/**

drivers/scsi/lpfc/lpfc_init.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12873,7 +12873,7 @@ lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline)
1287312873

1287412874
if (offline) {
1287512875
/* Find next online CPU on original mask */
12876-
cpu_next = cpumask_next_wrap(cpu, orig_mask, cpu, true);
12876+
cpu_next = cpumask_next_wrap(cpu, orig_mask);
1287712877
cpu_select = lpfc_next_online_cpu(orig_mask, cpu_next);
1287812878

1287912879
/* Found a valid CPU */

include/linux/bitmap.h

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -560,9 +560,9 @@ void bitmap_replace(unsigned long *dst,
560560
* ...0..11...0..10
561561
* dst: 0000001100000010
562562
*
563-
* A relationship exists between bitmap_scatter() and bitmap_gather().
563+
* A relationship exists between bitmap_scatter() and bitmap_gather(). See
564+
* bitmap_gather() for the bitmap gather detailed operations. TL;DR:
564565
* bitmap_gather() can be seen as the 'reverse' bitmap_scatter() operation.
565-
* See bitmap_scatter() for details related to this relationship.
566566
*/
567567
static __always_inline
568568
void bitmap_scatter(unsigned long *dst, const unsigned long *src,
@@ -608,7 +608,9 @@ void bitmap_scatter(unsigned long *dst, const unsigned long *src,
608608
* dst: 0000000000011010
609609
*
610610
* A relationship exists between bitmap_gather() and bitmap_scatter(). See
611-
* bitmap_scatter() for the bitmap scatter detailed operations.
611+
* bitmap_scatter() for the bitmap scatter detailed operations. TL;DR:
612+
* bitmap_scatter() can be seen as the 'reverse' bitmap_gather() operation.
613+
*
612614
* Suppose scattered computed using bitmap_scatter(scattered, src, mask, n).
613615
* The operation bitmap_gather(result, scattered, mask, n) leads to a result
614616
* equal or equivalent to src.

0 commit comments

Comments
 (0)