Skip to content

Commit b712075

Browse files
committed
Merge tag 'io_uring-6.7-2023-11-10' of git://git.kernel.dk/linux
Pull io_uring fixes from Jens Axboe: "Mostly just a few fixes and cleanups caused by the read multishot support. Outside of that, a stable fix for how a connect retry is done" * tag 'io_uring-6.7-2023-11-10' of git://git.kernel.dk/linux: io_uring: do not clamp read length for multishot read io_uring: do not allow multishot read to set addr or len io_uring: indicate if io_kbuf_recycle did recycle anything io_uring/rw: add separate prep handler for fixed read/write io_uring/rw: add separate prep handler for readv/writev io_uring/net: ensure socket is marked connected on connect retry io_uring/rw: don't attempt to allocate async data if opcode doesn't need it
2 parents 4b80378 + e537592 commit b712075

File tree

6 files changed

+80
-45
lines changed

6 files changed

+80
-45
lines changed

io_uring/kbuf.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ static int io_buffer_add_list(struct io_ring_ctx *ctx,
5252
return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL));
5353
}
5454

55-
void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags)
55+
bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags)
5656
{
5757
struct io_ring_ctx *ctx = req->ctx;
5858
struct io_buffer_list *bl;
@@ -65,7 +65,7 @@ void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags)
6565
* multiple use.
6666
*/
6767
if (req->flags & REQ_F_PARTIAL_IO)
68-
return;
68+
return false;
6969

7070
io_ring_submit_lock(ctx, issue_flags);
7171

@@ -76,7 +76,7 @@ void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags)
7676
req->buf_index = buf->bgid;
7777

7878
io_ring_submit_unlock(ctx, issue_flags);
79-
return;
79+
return true;
8080
}
8181

8282
unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags)

io_uring/kbuf.h

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -53,11 +53,11 @@ int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
5353

5454
unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags);
5555

56-
void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);
56+
bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);
5757

5858
void *io_pbuf_get_address(struct io_ring_ctx *ctx, unsigned long bgid);
5959

60-
static inline void io_kbuf_recycle_ring(struct io_kiocb *req)
60+
static inline bool io_kbuf_recycle_ring(struct io_kiocb *req)
6161
{
6262
/*
6363
* We don't need to recycle for REQ_F_BUFFER_RING, we can just clear
@@ -80,8 +80,10 @@ static inline void io_kbuf_recycle_ring(struct io_kiocb *req)
8080
} else {
8181
req->buf_index = req->buf_list->bgid;
8282
req->flags &= ~REQ_F_BUFFER_RING;
83+
return true;
8384
}
8485
}
86+
return false;
8587
}
8688

8789
static inline bool io_do_buffer_select(struct io_kiocb *req)
@@ -91,12 +93,13 @@ static inline bool io_do_buffer_select(struct io_kiocb *req)
9193
return !(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING));
9294
}
9395

94-
static inline void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
96+
static inline bool io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
9597
{
9698
if (req->flags & REQ_F_BUFFER_SELECTED)
97-
io_kbuf_recycle_legacy(req, issue_flags);
99+
return io_kbuf_recycle_legacy(req, issue_flags);
98100
if (req->flags & REQ_F_BUFFER_RING)
99-
io_kbuf_recycle_ring(req);
101+
return io_kbuf_recycle_ring(req);
102+
return false;
100103
}
101104

102105
static inline unsigned int __io_put_kbuf_list(struct io_kiocb *req,

io_uring/net.c

Lines changed: 11 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1461,16 +1461,6 @@ int io_connect(struct io_kiocb *req, unsigned int issue_flags)
14611461
int ret;
14621462
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
14631463

1464-
if (connect->in_progress) {
1465-
struct socket *socket;
1466-
1467-
ret = -ENOTSOCK;
1468-
socket = sock_from_file(req->file);
1469-
if (socket)
1470-
ret = sock_error(socket->sk);
1471-
goto out;
1472-
}
1473-
14741464
if (req_has_async_data(req)) {
14751465
io = req->async_data;
14761466
} else {
@@ -1490,9 +1480,7 @@ int io_connect(struct io_kiocb *req, unsigned int issue_flags)
14901480
&& force_nonblock) {
14911481
if (ret == -EINPROGRESS) {
14921482
connect->in_progress = true;
1493-
return -EAGAIN;
1494-
}
1495-
if (ret == -ECONNABORTED) {
1483+
} else if (ret == -ECONNABORTED) {
14961484
if (connect->seen_econnaborted)
14971485
goto out;
14981486
connect->seen_econnaborted = true;
@@ -1506,6 +1494,16 @@ int io_connect(struct io_kiocb *req, unsigned int issue_flags)
15061494
memcpy(req->async_data, &__io, sizeof(__io));
15071495
return -EAGAIN;
15081496
}
1497+
if (connect->in_progress) {
1498+
/*
1499+
* At least bluetooth will return -EBADFD on a re-connect
1500+
* attempt, and it's (supposedly) also valid to get -EISCONN
1501+
* which means the previous result is good. For both of these,
1502+
* grab the sock_error() and use that for the completion.
1503+
*/
1504+
if (ret == -EBADFD || ret == -EISCONN)
1505+
ret = sock_error(sock_from_file(req->file)->sk);
1506+
}
15091507
if (ret == -ERESTARTSYS)
15101508
ret = -EINTR;
15111509
out:

io_uring/opdef.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@ const struct io_issue_def io_issue_defs[] = {
6666
.iopoll = 1,
6767
.iopoll_queue = 1,
6868
.vectored = 1,
69-
.prep = io_prep_rw,
69+
.prep = io_prep_rwv,
7070
.issue = io_read,
7171
},
7272
[IORING_OP_WRITEV] = {
@@ -80,7 +80,7 @@ const struct io_issue_def io_issue_defs[] = {
8080
.iopoll = 1,
8181
.iopoll_queue = 1,
8282
.vectored = 1,
83-
.prep = io_prep_rw,
83+
.prep = io_prep_rwv,
8484
.issue = io_write,
8585
},
8686
[IORING_OP_FSYNC] = {
@@ -98,7 +98,7 @@ const struct io_issue_def io_issue_defs[] = {
9898
.ioprio = 1,
9999
.iopoll = 1,
100100
.iopoll_queue = 1,
101-
.prep = io_prep_rw,
101+
.prep = io_prep_rw_fixed,
102102
.issue = io_read,
103103
},
104104
[IORING_OP_WRITE_FIXED] = {
@@ -111,7 +111,7 @@ const struct io_issue_def io_issue_defs[] = {
111111
.ioprio = 1,
112112
.iopoll = 1,
113113
.iopoll_queue = 1,
114-
.prep = io_prep_rw,
114+
.prep = io_prep_rw_fixed,
115115
.issue = io_write,
116116
},
117117
[IORING_OP_POLL_ADD] = {

io_uring/rw.c

Lines changed: 52 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -83,18 +83,6 @@ int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
8383
/* used for fixed read/write too - just read unconditionally */
8484
req->buf_index = READ_ONCE(sqe->buf_index);
8585

86-
if (req->opcode == IORING_OP_READ_FIXED ||
87-
req->opcode == IORING_OP_WRITE_FIXED) {
88-
struct io_ring_ctx *ctx = req->ctx;
89-
u16 index;
90-
91-
if (unlikely(req->buf_index >= ctx->nr_user_bufs))
92-
return -EFAULT;
93-
index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
94-
req->imu = ctx->user_bufs[index];
95-
io_req_set_rsrc_node(req, ctx, 0);
96-
}
97-
9886
ioprio = READ_ONCE(sqe->ioprio);
9987
if (ioprio) {
10088
ret = ioprio_check_cap(ioprio);
@@ -110,16 +98,42 @@ int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
11098
rw->addr = READ_ONCE(sqe->addr);
11199
rw->len = READ_ONCE(sqe->len);
112100
rw->flags = READ_ONCE(sqe->rw_flags);
101+
return 0;
102+
}
103+
104+
int io_prep_rwv(struct io_kiocb *req, const struct io_uring_sqe *sqe)
105+
{
106+
int ret;
107+
108+
ret = io_prep_rw(req, sqe);
109+
if (unlikely(ret))
110+
return ret;
113111

114-
/* Have to do this validation here, as this is in io_read() rw->len might
115-
* have chanaged due to buffer selection
112+
/*
113+
* Have to do this validation here, as this is in io_read() rw->len
114+
* might have chanaged due to buffer selection
116115
*/
117-
if (req->opcode == IORING_OP_READV && req->flags & REQ_F_BUFFER_SELECT) {
118-
ret = io_iov_buffer_select_prep(req);
119-
if (ret)
120-
return ret;
121-
}
116+
if (req->flags & REQ_F_BUFFER_SELECT)
117+
return io_iov_buffer_select_prep(req);
118+
119+
return 0;
120+
}
122121

122+
int io_prep_rw_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe)
123+
{
124+
struct io_ring_ctx *ctx = req->ctx;
125+
u16 index;
126+
int ret;
127+
128+
ret = io_prep_rw(req, sqe);
129+
if (unlikely(ret))
130+
return ret;
131+
132+
if (unlikely(req->buf_index >= ctx->nr_user_bufs))
133+
return -EFAULT;
134+
index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
135+
req->imu = ctx->user_bufs[index];
136+
io_req_set_rsrc_node(req, ctx, 0);
123137
return 0;
124138
}
125139

@@ -129,12 +143,20 @@ int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
129143
*/
130144
int io_read_mshot_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
131145
{
146+
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
132147
int ret;
133148

149+
/* must be used with provided buffers */
150+
if (!(req->flags & REQ_F_BUFFER_SELECT))
151+
return -EINVAL;
152+
134153
ret = io_prep_rw(req, sqe);
135154
if (unlikely(ret))
136155
return ret;
137156

157+
if (rw->addr || rw->len)
158+
return -EINVAL;
159+
138160
req->flags |= REQ_F_APOLL_MULTISHOT;
139161
return 0;
140162
}
@@ -542,6 +564,9 @@ static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
542564
{
543565
if (!force && !io_cold_defs[req->opcode].prep_async)
544566
return 0;
567+
/* opcode type doesn't need async data */
568+
if (!io_cold_defs[req->opcode].async_size)
569+
return 0;
545570
if (!req_has_async_data(req)) {
546571
struct io_async_rw *iorw;
547572

@@ -887,6 +912,7 @@ int io_read(struct io_kiocb *req, unsigned int issue_flags)
887912

888913
int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags)
889914
{
915+
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
890916
unsigned int cflags = 0;
891917
int ret;
892918

@@ -903,7 +929,12 @@ int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags)
903929
* handling arm it.
904930
*/
905931
if (ret == -EAGAIN) {
906-
io_kbuf_recycle(req, issue_flags);
932+
/*
933+
* Reset rw->len to 0 again to avoid clamping future mshot
934+
* reads, in case the buffer size varies.
935+
*/
936+
if (io_kbuf_recycle(req, issue_flags))
937+
rw->len = 0;
907938
return -EAGAIN;
908939
}
909940

@@ -916,6 +947,7 @@ int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags)
916947
* jump to the termination path. This request is then done.
917948
*/
918949
cflags = io_put_kbuf(req, issue_flags);
950+
rw->len = 0; /* similarly to above, reset len to 0 */
919951

920952
if (io_fill_cqe_req_aux(req,
921953
issue_flags & IO_URING_F_COMPLETE_DEFER,

io_uring/rw.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,8 @@ struct io_async_rw {
1616
};
1717

1818
int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe);
19+
int io_prep_rwv(struct io_kiocb *req, const struct io_uring_sqe *sqe);
20+
int io_prep_rw_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe);
1921
int io_read(struct io_kiocb *req, unsigned int issue_flags);
2022
int io_readv_prep_async(struct io_kiocb *req);
2123
int io_write(struct io_kiocb *req, unsigned int issue_flags);

0 commit comments

Comments
 (0)