Skip to content

Commit 38808af

Browse files
calebsanderkeithbusch
authored andcommitted
nvme/ioctl: move fixed buffer lookup to nvme_uring_cmd_io()
nvme_map_user_request() is called from both nvme_submit_user_cmd() and nvme_uring_cmd_io(). But the ioucmd branch is only applicable to nvme_uring_cmd_io(). Move it to nvme_uring_cmd_io() and just pass the resulting iov_iter to nvme_map_user_request(). For NVMe passthru operations with fixed buffers, the fixed buffer lookup happens in io_uring_cmd_import_fixed(). But nvme_uring_cmd_io() can return -EAGAIN first from nvme_alloc_user_request() if all tags in the tag set are in use. This ordering difference is observable when using UBLK_U_IO_{,UN}REGISTER_IO_BUF SQEs to modify the fixed buffer table. If the NVMe passthru operation is followed by UBLK_U_IO_UNREGISTER_IO_BUF to unregister the fixed buffer and the NVMe passthru goes async, the fixed buffer lookup will fail because it happens after the unregister. Userspace should not depend on the order in which io_uring issues SQEs submitted in parallel, but it may try submitting the SQEs together and fall back on a slow path if the fixed buffer lookup fails. To make the fast path more likely, do the import before nvme_alloc_user_request(). Signed-off-by: Caleb Sander Mateos <csander@purestorage.com> Reviewed-by: Jens Axboe <axboe@kernel.dk> Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com> Signed-off-by: Keith Busch <kbusch@kernel.org>
1 parent cd683de commit 38808af

File tree

1 file changed

+24
-21
lines changed

1 file changed

+24
-21
lines changed

drivers/nvme/host/ioctl.c

Lines changed: 24 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -114,8 +114,7 @@ static struct request *nvme_alloc_user_request(struct request_queue *q,
114114

115115
static int nvme_map_user_request(struct request *req, u64 ubuffer,
116116
unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
117-
struct io_uring_cmd *ioucmd, unsigned int flags,
118-
unsigned int iou_issue_flags)
117+
struct iov_iter *iter, unsigned int flags)
119118
{
120119
struct request_queue *q = req->q;
121120
struct nvme_ns *ns = q->queuedata;
@@ -137,24 +136,12 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
137136
"using unchecked metadata buffer\n");
138137
}
139138

140-
if (ioucmd && (ioucmd->flags & IORING_URING_CMD_FIXED)) {
141-
struct iov_iter iter;
142-
143-
/* fixedbufs is only for non-vectored io */
144-
if (flags & NVME_IOCTL_VEC)
145-
return -EINVAL;
146-
147-
ret = io_uring_cmd_import_fixed(ubuffer, bufflen,
148-
rq_data_dir(req), &iter, ioucmd,
149-
iou_issue_flags);
150-
if (ret < 0)
151-
return ret;
152-
ret = blk_rq_map_user_iov(q, req, NULL, &iter, GFP_KERNEL);
153-
} else {
139+
if (iter)
140+
ret = blk_rq_map_user_iov(q, req, NULL, iter, GFP_KERNEL);
141+
else
154142
ret = blk_rq_map_user_io(req, NULL, nvme_to_user_ptr(ubuffer),
155143
bufflen, GFP_KERNEL, flags & NVME_IOCTL_VEC, 0,
156144
0, rq_data_dir(req));
157-
}
158145

159146
if (ret)
160147
return ret;
@@ -196,7 +183,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
196183
req->timeout = timeout;
197184
if (ubuffer && bufflen) {
198185
ret = nvme_map_user_request(req, ubuffer, bufflen, meta_buffer,
199-
meta_len, NULL, flags, 0);
186+
meta_len, NULL, flags);
200187
if (ret)
201188
goto out_free_req;
202189
}
@@ -468,6 +455,8 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
468455
struct request_queue *q = ns ? ns->queue : ctrl->admin_q;
469456
struct nvme_uring_data d;
470457
struct nvme_command c;
458+
struct iov_iter iter;
459+
struct iov_iter *map_iter = NULL;
471460
struct request *req;
472461
blk_opf_t rq_flags = REQ_ALLOC_CACHE;
473462
blk_mq_req_flags_t blk_flags = 0;
@@ -503,6 +492,20 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
503492
d.metadata_len = READ_ONCE(cmd->metadata_len);
504493
d.timeout_ms = READ_ONCE(cmd->timeout_ms);
505494

495+
if (d.data_len && (ioucmd->flags & IORING_URING_CMD_FIXED)) {
496+
/* fixedbufs is only for non-vectored io */
497+
if (vec)
498+
return -EINVAL;
499+
500+
ret = io_uring_cmd_import_fixed(d.addr, d.data_len,
501+
nvme_is_write(&c) ? WRITE : READ, &iter, ioucmd,
502+
issue_flags);
503+
if (ret < 0)
504+
return ret;
505+
506+
map_iter = &iter;
507+
}
508+
506509
if (issue_flags & IO_URING_F_NONBLOCK) {
507510
rq_flags |= REQ_NOWAIT;
508511
blk_flags = BLK_MQ_REQ_NOWAIT;
@@ -516,9 +519,9 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
516519
req->timeout = d.timeout_ms ? msecs_to_jiffies(d.timeout_ms) : 0;
517520

518521
if (d.data_len) {
519-
ret = nvme_map_user_request(req, d.addr,
520-
d.data_len, nvme_to_user_ptr(d.metadata),
521-
d.metadata_len, ioucmd, vec, issue_flags);
522+
ret = nvme_map_user_request(req, d.addr, d.data_len,
523+
nvme_to_user_ptr(d.metadata), d.metadata_len,
524+
map_iter, vec);
522525
if (ret)
523526
goto out_free_req;
524527
}

0 commit comments

Comments
 (0)