Skip to content

Commit be0e822

Browse files
Christoph Hellwigaxboe
authored andcommitted
block: fix queue limits checks in blk_rq_map_user_bvec for real
blk_rq_map_user_bvec currently only has ad-hoc checks for queue limits, and the last fix to it enabled valid NVMe I/O to pass, but also allowed invalid one for drivers that set a max_segment_size or seg_boundary limit. Fix it once for all by using the bio_split_rw_at helper from the I/O path that indicates if and where a bio would be have to be split to adhere to the queue limits, and it returns a positive value, turn that into -EREMOTEIO to retry using the copy path. Fixes: 2ff9494 ("block: fix sanity checks in blk_rq_map_user_bvec") Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: John Garry <john.g.garry@oracle.com> Link: https://lore.kernel.org/r/20241028090840.446180-1-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent 2ff9494 commit be0e822

File tree

1 file changed

+17
-39
lines changed

1 file changed

+17
-39
lines changed

block/blk-map.c

Lines changed: 17 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -561,55 +561,33 @@ EXPORT_SYMBOL(blk_rq_append_bio);
561561
/* Prepare bio for passthrough IO given ITER_BVEC iter */
562562
static int blk_rq_map_user_bvec(struct request *rq, const struct iov_iter *iter)
563563
{
564-
struct request_queue *q = rq->q;
565-
size_t nr_iter = iov_iter_count(iter);
566-
size_t nr_segs = iter->nr_segs;
567-
struct bio_vec *bvecs, *bvprvp = NULL;
568-
const struct queue_limits *lim = &q->limits;
569-
unsigned int nsegs = 0, bytes = 0;
564+
const struct queue_limits *lim = &rq->q->limits;
565+
unsigned int max_bytes = lim->max_hw_sectors << SECTOR_SHIFT;
566+
unsigned int nsegs;
570567
struct bio *bio;
571-
size_t i;
568+
int ret;
572569

573-
if (!nr_iter || (nr_iter >> SECTOR_SHIFT) > queue_max_hw_sectors(q))
574-
return -EINVAL;
575-
if (nr_segs > queue_max_segments(q))
570+
if (!iov_iter_count(iter) || iov_iter_count(iter) > max_bytes)
576571
return -EINVAL;
577572

578-
/* no iovecs to alloc, as we already have a BVEC iterator */
573+
/* reuse the bvecs from the iterator instead of allocating new ones */
579574
bio = blk_rq_map_bio_alloc(rq, 0, GFP_KERNEL);
580-
if (bio == NULL)
575+
if (!bio)
581576
return -ENOMEM;
582-
583577
bio_iov_bvec_set(bio, (struct iov_iter *)iter);
584-
blk_rq_bio_prep(rq, bio, nr_segs);
585-
586-
/* loop to perform a bunch of sanity checks */
587-
bvecs = (struct bio_vec *)iter->bvec;
588-
for (i = 0; i < nr_segs; i++) {
589-
struct bio_vec *bv = &bvecs[i];
590-
591-
/*
592-
* If the queue doesn't support SG gaps and adding this
593-
* offset would create a gap, fallback to copy.
594-
*/
595-
if (bvprvp && bvec_gap_to_prev(lim, bvprvp, bv->bv_offset)) {
596-
blk_mq_map_bio_put(bio);
597-
return -EREMOTEIO;
598-
}
599-
/* check full condition */
600-
if (nsegs >= nr_segs || bytes > UINT_MAX - bv->bv_len)
601-
goto put_bio;
602-
if (bytes + bv->bv_len > nr_iter)
603-
break;
604578

605-
nsegs++;
606-
bytes += bv->bv_len;
607-
bvprvp = bv;
579+
/* check that the data layout matches the hardware restrictions */
580+
ret = bio_split_rw_at(bio, lim, &nsegs, max_bytes);
581+
if (ret) {
582+
/* if we would have to split the bio, copy instead */
583+
if (ret > 0)
584+
ret = -EREMOTEIO;
585+
blk_mq_map_bio_put(bio);
586+
return ret;
608587
}
588+
589+
blk_rq_bio_prep(rq, bio, nsegs);
609590
return 0;
610-
put_bio:
611-
blk_mq_map_bio_put(bio);
612-
return -EINVAL;
613591
}
614592

615593
/**

0 commit comments

Comments
 (0)