Skip to content

Commit 560c3b5

Browse files
axboegregkh
authored andcommitted
io_uring/kbuf: flag partial buffer mappings
A previous commit aborted mapping more for a non-incremental ring for bundle peeking, but depending on where in the process this peeking happened, it would not necessarily prevent a retry by the user. That can create gaps in the received/read data. Add struct buf_sel_arg->partial_map, which can pass this information back. The networking side can then map that to internal state and use it to gate retry as well. Since this necessitates a new flag, change io_sr_msg->retry to a retry_flags member, and store both the retry and partial map condition in there. Cc: stable@vger.kernel.org Fixes: 26ec15e ("io_uring/kbuf: don't truncate end buffer for multiple buffer peeks") Signed-off-by: Jens Axboe <axboe@kernel.dk> (cherry picked from commit 178b8ff) Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
1 parent c8d152b commit 560c3b5

File tree

3 files changed

+17
-8
lines changed

3 files changed

+17
-8
lines changed

io_uring/kbuf.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -263,6 +263,7 @@ static int io_ring_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg,
263263
if (len > arg->max_len) {
264264
len = arg->max_len;
265265
if (!(bl->flags & IOBL_INC)) {
266+
arg->partial_map = 1;
266267
if (iov != arg->iovs)
267268
break;
268269
buf->len = len;

io_uring/kbuf.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -61,6 +61,7 @@ struct buf_sel_arg {
6161
size_t max_len;
6262
unsigned short nr_iovs;
6363
unsigned short mode;
64+
unsigned short partial_map;
6465
};
6566

6667
void __user *io_buffer_select(struct io_kiocb *req, size_t *len,

io_uring/net.c

Lines changed: 15 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -76,13 +76,18 @@ struct io_sr_msg {
7676
/* initialised and used only by !msg send variants */
7777
u16 addr_len;
7878
u16 buf_group;
79-
bool retry;
79+
unsigned short retry_flags;
8080
void __user *addr;
8181
void __user *msg_control;
8282
/* used only for send zerocopy */
8383
struct io_kiocb *notif;
8484
};
8585

86+
enum sr_retry_flags {
87+
IO_SR_MSG_RETRY = 1,
88+
IO_SR_MSG_PARTIAL_MAP = 2,
89+
};
90+
8691
/*
8792
* Number of times we'll try and do receives if there's more data. If we
8893
* exceed this limit, then add us to the back of the queue and retry from
@@ -204,7 +209,7 @@ static inline void io_mshot_prep_retry(struct io_kiocb *req,
204209

205210
req->flags &= ~REQ_F_BL_EMPTY;
206211
sr->done_io = 0;
207-
sr->retry = false;
212+
sr->retry_flags = 0;
208213
sr->len = 0; /* get from the provided buffer */
209214
req->buf_index = sr->buf_group;
210215
}
@@ -411,7 +416,7 @@ int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
411416
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
412417

413418
sr->done_io = 0;
414-
sr->retry = false;
419+
sr->retry_flags = 0;
415420

416421
if (req->opcode == IORING_OP_SEND) {
417422
if (READ_ONCE(sqe->__pad3[0]))
@@ -783,7 +788,7 @@ int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
783788
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
784789

785790
sr->done_io = 0;
786-
sr->retry = false;
791+
sr->retry_flags = 0;
787792

788793
if (unlikely(sqe->file_index || sqe->addr2))
789794
return -EINVAL;
@@ -856,7 +861,7 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
856861

857862
cflags |= io_put_kbufs(req, this_ret, io_bundle_nbufs(kmsg, this_ret),
858863
issue_flags);
859-
if (sr->retry)
864+
if (sr->retry_flags & IO_SR_MSG_RETRY)
860865
cflags = req->cqe.flags | (cflags & CQE_F_MASK);
861866
/* bundle with no more immediate buffers, we're done */
862867
if (req->flags & REQ_F_BL_EMPTY)
@@ -865,12 +870,12 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
865870
* If more is available AND it was a full transfer, retry and
866871
* append to this one
867872
*/
868-
if (!sr->retry && kmsg->msg.msg_inq > 1 && this_ret > 0 &&
873+
if (!sr->retry_flags && kmsg->msg.msg_inq > 1 && this_ret > 0 &&
869874
!iov_iter_count(&kmsg->msg.msg_iter)) {
870875
req->cqe.flags = cflags & ~CQE_F_MASK;
871876
sr->len = kmsg->msg.msg_inq;
872877
sr->done_io += this_ret;
873-
sr->retry = true;
878+
sr->retry_flags |= IO_SR_MSG_RETRY;
874879
return false;
875880
}
876881
} else {
@@ -1123,6 +1128,8 @@ static int io_recv_buf_select(struct io_kiocb *req, struct io_async_msghdr *kmsg
11231128
kmsg->free_iov = arg.iovs;
11241129
req->flags |= REQ_F_NEED_CLEANUP;
11251130
}
1131+
if (arg.partial_map)
1132+
sr->retry_flags |= IO_SR_MSG_PARTIAL_MAP;
11261133

11271134
/* special case 1 vec, can be a fast path */
11281135
if (ret == 1) {
@@ -1252,7 +1259,7 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
12521259
struct io_kiocb *notif;
12531260

12541261
zc->done_io = 0;
1255-
zc->retry = false;
1262+
zc->retry_flags = 0;
12561263
req->flags |= REQ_F_POLL_NO_LAZY;
12571264

12581265
if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3)))

0 commit comments

Comments
 (0)