Skip to content

Commit f688944

Browse files
committed
io_uring/rw: add separate prep handler for fixed read/write
Rather than sprinkle opcode checks in the generic read/write prep handler, have a separate prep handler for the vectored readv/writev operation. Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent 0e984ec commit f688944

File tree

3 files changed

+21
-14
lines changed

3 files changed

+21
-14
lines changed

io_uring/opdef.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -98,7 +98,7 @@ const struct io_issue_def io_issue_defs[] = {
9898
.ioprio = 1,
9999
.iopoll = 1,
100100
.iopoll_queue = 1,
101-
.prep = io_prep_rw,
101+
.prep = io_prep_rw_fixed,
102102
.issue = io_read,
103103
},
104104
[IORING_OP_WRITE_FIXED] = {
@@ -111,7 +111,7 @@ const struct io_issue_def io_issue_defs[] = {
111111
.ioprio = 1,
112112
.iopoll = 1,
113113
.iopoll_queue = 1,
114-
.prep = io_prep_rw,
114+
.prep = io_prep_rw_fixed,
115115
.issue = io_write,
116116
},
117117
[IORING_OP_POLL_ADD] = {

io_uring/rw.c

Lines changed: 18 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -83,18 +83,6 @@ int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
8383
/* used for fixed read/write too - just read unconditionally */
8484
req->buf_index = READ_ONCE(sqe->buf_index);
8585

86-
if (req->opcode == IORING_OP_READ_FIXED ||
87-
req->opcode == IORING_OP_WRITE_FIXED) {
88-
struct io_ring_ctx *ctx = req->ctx;
89-
u16 index;
90-
91-
if (unlikely(req->buf_index >= ctx->nr_user_bufs))
92-
return -EFAULT;
93-
index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
94-
req->imu = ctx->user_bufs[index];
95-
io_req_set_rsrc_node(req, ctx, 0);
96-
}
97-
9886
ioprio = READ_ONCE(sqe->ioprio);
9987
if (ioprio) {
10088
ret = ioprio_check_cap(ioprio);
@@ -131,6 +119,24 @@ int io_prep_rwv(struct io_kiocb *req, const struct io_uring_sqe *sqe)
131119
return 0;
132120
}
133121

122+
int io_prep_rw_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe)
123+
{
124+
struct io_ring_ctx *ctx = req->ctx;
125+
u16 index;
126+
int ret;
127+
128+
ret = io_prep_rw(req, sqe);
129+
if (unlikely(ret))
130+
return ret;
131+
132+
if (unlikely(req->buf_index >= ctx->nr_user_bufs))
133+
return -EFAULT;
134+
index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
135+
req->imu = ctx->user_bufs[index];
136+
io_req_set_rsrc_node(req, ctx, 0);
137+
return 0;
138+
}
139+
134140
/*
135141
* Multishot read is prepared just like a normal read/write request, only
136142
* difference is that we set the MULTISHOT flag.

io_uring/rw.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@ struct io_async_rw {
1717

1818
int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe);
1919
int io_prep_rwv(struct io_kiocb *req, const struct io_uring_sqe *sqe);
20+
int io_prep_rw_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe);
2021
int io_read(struct io_kiocb *req, unsigned int issue_flags);
2122
int io_readv_prep_async(struct io_kiocb *req);
2223
int io_write(struct io_kiocb *req, unsigned int issue_flags);

0 commit comments

Comments
 (0)