Skip to content

Commit 11a299a

Browse files
committed
Merge tag 'for-6.12/block-20240925' of git://git.kernel.dk/linux
Pull more block updates from Jens Axboe: - Improve blk-integrity segment counting and merging (Keith) - NVMe pull request via Keith: - Multipath fixes (Hannes) - Sysfs attribute list NULL terminate fix (Shin'ichiro) - Remove problematic read-back (Keith) - Fix for a regression with the IO scheduler switching freezing from 6.11 (Damien) - Use a raw spinlock for sbitmap, as it may get called from preempt disabled context (Ming) - Cleanup for bd_claiming waiting, using var_waitqueue() rather than the bit waitqueues, as that more accurately describes that it does (Neil) - Various cleanups (Kanchan, Qiu-ji, David) * tag 'for-6.12/block-20240925' of git://git.kernel.dk/linux: nvme: remove CC register read-back during enabling nvme: null terminate nvme_tls_attrs nvme-multipath: avoid hang on inaccessible namespaces nvme-multipath: system fails to create generic nvme device lib/sbitmap: define swap_lock as raw_spinlock_t block: Remove unused blk_limits_io_{min,opt} drbd: Fix atomicity violation in drbd_uuid_set_bm() block: Fix elv_iosched_local_module handling of "none" scheduler block: remove bogus union block: change wait on bd_claiming to use a var_waitqueue blk-integrity: improved sg segment mapping block: unexport blk_rq_count_integrity_sg nvme-rdma: use request to get integrity segments scsi: use request to get integrity segments block: provide a request helper for user integrity segments blk-integrity: consider entire bio list for merging blk-integrity: properly account for segments blk-mq: set the nr_integrity_segments from bio blk-mq: unconditional nr_integrity_segments
2 parents fe29393 + a045553 commit 11a299a

File tree

20 files changed

+76
-100
lines changed

20 files changed

+76
-100
lines changed

block/bdev.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -555,7 +555,7 @@ int bd_prepare_to_claim(struct block_device *bdev, void *holder,
555555

556556
/* if claiming is already in progress, wait for it to finish */
557557
if (whole->bd_claiming) {
558-
wait_queue_head_t *wq = bit_waitqueue(&whole->bd_claiming, 0);
558+
wait_queue_head_t *wq = __var_waitqueue(&whole->bd_claiming);
559559
DEFINE_WAIT(wait);
560560

561561
prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
@@ -578,7 +578,7 @@ static void bd_clear_claiming(struct block_device *whole, void *holder)
578578
/* tell others that we're done */
579579
BUG_ON(whole->bd_claiming != holder);
580580
whole->bd_claiming = NULL;
581-
wake_up_bit(&whole->bd_claiming, 0);
581+
wake_up_var(&whole->bd_claiming);
582582
}
583583

584584
/**

block/bio-integrity.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -367,7 +367,6 @@ int bio_integrity_map_user(struct bio *bio, void __user *ubuf, ssize_t bytes,
367367
kfree(bvec);
368368
return ret;
369369
}
370-
EXPORT_SYMBOL_GPL(bio_integrity_map_user);
371370

372371
/**
373372
* bio_integrity_prep - Prepare bio for integrity I/O

block/blk-integrity.c

Lines changed: 25 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,6 @@ int blk_rq_count_integrity_sg(struct request_queue *q, struct bio *bio)
5353

5454
return segments;
5555
}
56-
EXPORT_SYMBOL(blk_rq_count_integrity_sg);
5756

5857
/**
5958
* blk_rq_map_integrity_sg - Map integrity metadata into a scatterlist
@@ -63,19 +62,20 @@ EXPORT_SYMBOL(blk_rq_count_integrity_sg);
6362
*
6463
* Description: Map the integrity vectors in request into a
6564
* scatterlist. The scatterlist must be big enough to hold all
66-
* elements. I.e. sized using blk_rq_count_integrity_sg().
65+
* elements. I.e. sized using blk_rq_count_integrity_sg() or
66+
* rq->nr_integrity_segments.
6767
*/
68-
int blk_rq_map_integrity_sg(struct request_queue *q, struct bio *bio,
69-
struct scatterlist *sglist)
68+
int blk_rq_map_integrity_sg(struct request *rq, struct scatterlist *sglist)
7069
{
7170
struct bio_vec iv, ivprv = { NULL };
71+
struct request_queue *q = rq->q;
7272
struct scatterlist *sg = NULL;
73+
struct bio *bio = rq->bio;
7374
unsigned int segments = 0;
7475
struct bvec_iter iter;
7576
int prev = 0;
7677

7778
bio_for_each_integrity_vec(iv, bio, iter) {
78-
7979
if (prev) {
8080
if (!biovec_phys_mergeable(q, &ivprv, &iv))
8181
goto new_segment;
@@ -103,10 +103,30 @@ int blk_rq_map_integrity_sg(struct request_queue *q, struct bio *bio,
103103
if (sg)
104104
sg_mark_end(sg);
105105

106+
/*
107+
* Something must have been wrong if the figured number of segment
108+
* is bigger than number of req's physical integrity segments
109+
*/
110+
BUG_ON(segments > rq->nr_integrity_segments);
111+
BUG_ON(segments > queue_max_integrity_segments(q));
106112
return segments;
107113
}
108114
EXPORT_SYMBOL(blk_rq_map_integrity_sg);
109115

116+
int blk_rq_integrity_map_user(struct request *rq, void __user *ubuf,
117+
ssize_t bytes, u32 seed)
118+
{
119+
int ret = bio_integrity_map_user(rq->bio, ubuf, bytes, seed);
120+
121+
if (ret)
122+
return ret;
123+
124+
rq->nr_integrity_segments = blk_rq_count_integrity_sg(rq->q, rq->bio);
125+
rq->cmd_flags |= REQ_INTEGRITY;
126+
return 0;
127+
}
128+
EXPORT_SYMBOL_GPL(blk_rq_integrity_map_user);
129+
110130
bool blk_integrity_merge_rq(struct request_queue *q, struct request *req,
111131
struct request *next)
112132
{
@@ -134,7 +154,6 @@ bool blk_integrity_merge_bio(struct request_queue *q, struct request *req,
134154
struct bio *bio)
135155
{
136156
int nr_integrity_segs;
137-
struct bio *next = bio->bi_next;
138157

139158
if (blk_integrity_rq(req) == 0 && bio_integrity(bio) == NULL)
140159
return true;
@@ -145,16 +164,11 @@ bool blk_integrity_merge_bio(struct request_queue *q, struct request *req,
145164
if (bio_integrity(req->bio)->bip_flags != bio_integrity(bio)->bip_flags)
146165
return false;
147166

148-
bio->bi_next = NULL;
149167
nr_integrity_segs = blk_rq_count_integrity_sg(q, bio);
150-
bio->bi_next = next;
151-
152168
if (req->nr_integrity_segments + nr_integrity_segs >
153169
q->limits.max_integrity_segments)
154170
return false;
155171

156-
req->nr_integrity_segments += nr_integrity_segs;
157-
158172
return true;
159173
}
160174

block/blk-merge.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -639,6 +639,9 @@ static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
639639
* counters.
640640
*/
641641
req->nr_phys_segments += nr_phys_segs;
642+
if (bio_integrity(bio))
643+
req->nr_integrity_segments += blk_rq_count_integrity_sg(req->q,
644+
bio);
642645
return 1;
643646

644647
no_merge:
@@ -731,6 +734,7 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
731734

732735
/* Merge is OK... */
733736
req->nr_phys_segments = total_phys_segments;
737+
req->nr_integrity_segments += next->nr_integrity_segments;
734738
return 1;
735739
}
736740

block/blk-mq.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -376,9 +376,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
376376
rq->io_start_time_ns = 0;
377377
rq->stats_sectors = 0;
378378
rq->nr_phys_segments = 0;
379-
#if defined(CONFIG_BLK_DEV_INTEGRITY)
380379
rq->nr_integrity_segments = 0;
381-
#endif
382380
rq->end_io = NULL;
383381
rq->end_io_data = NULL;
384382

@@ -2546,6 +2544,9 @@ static void blk_mq_bio_to_request(struct request *rq, struct bio *bio,
25462544
rq->__sector = bio->bi_iter.bi_sector;
25472545
rq->write_hint = bio->bi_write_hint;
25482546
blk_rq_bio_prep(rq, bio, nr_segs);
2547+
if (bio_integrity(bio))
2548+
rq->nr_integrity_segments = blk_rq_count_integrity_sg(rq->q,
2549+
bio);
25492550

25502551
/* This can't fail, since GFP_NOIO includes __GFP_DIRECT_RECLAIM. */
25512552
err = blk_crypto_rq_bio_prep(rq, bio, GFP_NOIO);

block/blk-settings.c

Lines changed: 0 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -437,48 +437,6 @@ int queue_limits_set(struct request_queue *q, struct queue_limits *lim)
437437
}
438438
EXPORT_SYMBOL_GPL(queue_limits_set);
439439

440-
/**
441-
* blk_limits_io_min - set minimum request size for a device
442-
* @limits: the queue limits
443-
* @min: smallest I/O size in bytes
444-
*
445-
* Description:
446-
* Some devices have an internal block size bigger than the reported
447-
* hardware sector size. This function can be used to signal the
448-
* smallest I/O the device can perform without incurring a performance
449-
* penalty.
450-
*/
451-
void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
452-
{
453-
limits->io_min = min;
454-
455-
if (limits->io_min < limits->logical_block_size)
456-
limits->io_min = limits->logical_block_size;
457-
458-
if (limits->io_min < limits->physical_block_size)
459-
limits->io_min = limits->physical_block_size;
460-
}
461-
EXPORT_SYMBOL(blk_limits_io_min);
462-
463-
/**
464-
* blk_limits_io_opt - set optimal request size for a device
465-
* @limits: the queue limits
466-
* @opt: smallest I/O size in bytes
467-
*
468-
* Description:
469-
* Storage devices may report an optimal I/O size, which is the
470-
* device's preferred unit for sustained I/O. This is rarely reported
471-
* for disk drives. For RAID arrays it is usually the stripe width or
472-
* the internal track size. A properly aligned multiple of
473-
* optimal_io_size is the preferred request size for workloads where
474-
* sustained throughput is desired.
475-
*/
476-
void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
477-
{
478-
limits->io_opt = opt;
479-
}
480-
EXPORT_SYMBOL(blk_limits_io_opt);
481-
482440
static int queue_limit_alignment_offset(const struct queue_limits *lim,
483441
sector_t sector)
484442
{

block/elevator.c

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -715,7 +715,9 @@ int elv_iosched_load_module(struct gendisk *disk, const char *buf,
715715

716716
strscpy(elevator_name, buf, sizeof(elevator_name));
717717

718-
return request_module("%s-iosched", strstrip(elevator_name));
718+
request_module("%s-iosched", strstrip(elevator_name));
719+
720+
return 0;
719721
}
720722

721723
ssize_t elv_iosched_store(struct gendisk *disk, const char *buf,

drivers/block/drbd/drbd_main.c

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3399,10 +3399,12 @@ void drbd_uuid_new_current(struct drbd_device *device) __must_hold(local)
33993399
void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local)
34003400
{
34013401
unsigned long flags;
3402-
if (device->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
3402+
spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
3403+
if (device->ldev->md.uuid[UI_BITMAP] == 0 && val == 0) {
3404+
spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
34033405
return;
3406+
}
34043407

3405-
spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
34063408
if (val == 0) {
34073409
drbd_uuid_move_history(device);
34083410
device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[UI_BITMAP];

drivers/nvme/host/core.c

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -2468,11 +2468,6 @@ int nvme_enable_ctrl(struct nvme_ctrl *ctrl)
24682468
if (ret)
24692469
return ret;
24702470

2471-
/* Flush write to device (required if transport is PCI) */
2472-
ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CC, &ctrl->ctrl_config);
2473-
if (ret)
2474-
return ret;
2475-
24762471
/* CAP value may change after initial CC write */
24772472
ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap);
24782473
if (ret)

drivers/nvme/host/ioctl.c

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,6 @@
33
* Copyright (c) 2011-2014, Intel Corporation.
44
* Copyright (c) 2017-2021 Christoph Hellwig.
55
*/
6-
#include <linux/bio-integrity.h>
76
#include <linux/blk-integrity.h>
87
#include <linux/ptrace.h> /* for force_successful_syscall_return */
98
#include <linux/nvme_ioctl.h>
@@ -153,11 +152,10 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
153152
bio_set_dev(bio, bdev);
154153

155154
if (has_metadata) {
156-
ret = bio_integrity_map_user(bio, meta_buffer, meta_len,
157-
meta_seed);
155+
ret = blk_rq_integrity_map_user(req, meta_buffer, meta_len,
156+
meta_seed);
158157
if (ret)
159158
goto out_unmap;
160-
req->cmd_flags |= REQ_INTEGRITY;
161159
}
162160

163161
return ret;

0 commit comments

Comments
 (0)