Skip to content

Commit a984e23

Browse files
committed
Merge tag 'io_uring-6.13-20250103' of git://git.kernel.dk/linux
Pull io_uring fixes from Jens Axboe: - Fix an issue with the read multishot support and posting of CQEs from io-wq context - Fix a regression introduced in this cycle, where making the timeout lock a raw one uncovered another locking dependency. As a result, move the timeout flushing outside of the timeout lock, punting them to a local list first - Fix use of an uninitialized variable in io_async_msghdr. Doesn't really matter functionally, but silences a valid KMSAN complaint that it's not always initialized - Fix use of incrementally provided buffers for read on non-pollable files, where the buffer always gets committed upfront. Unfortunately the buffer address isn't resolved first, so the read ends up using the updated rather than the current value * tag 'io_uring-6.13-20250103' of git://git.kernel.dk/linux: io_uring/kbuf: use pre-committed buffer address for non-pollable file io_uring/net: always initialize kmsg->msg.msg_inq upfront io_uring/timeout: flush timeouts outside of the timeout lock io_uring/rw: fix downgraded mshot read
2 parents aba74e6 + ed123c9 commit a984e23

File tree

4 files changed

+37
-15
lines changed

4 files changed

+37
-15
lines changed

io_uring/kbuf.c

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -139,6 +139,7 @@ static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
139139
struct io_uring_buf_ring *br = bl->buf_ring;
140140
__u16 tail, head = bl->head;
141141
struct io_uring_buf *buf;
142+
void __user *ret;
142143

143144
tail = smp_load_acquire(&br->tail);
144145
if (unlikely(tail == head))
@@ -153,6 +154,7 @@ static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
153154
req->flags |= REQ_F_BUFFER_RING | REQ_F_BUFFERS_COMMIT;
154155
req->buf_list = bl;
155156
req->buf_index = buf->bid;
157+
ret = u64_to_user_ptr(buf->addr);
156158

157159
if (issue_flags & IO_URING_F_UNLOCKED || !io_file_can_poll(req)) {
158160
/*
@@ -168,7 +170,7 @@ static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
168170
io_kbuf_commit(req, bl, *len, 1);
169171
req->buf_list = NULL;
170172
}
171-
return u64_to_user_ptr(buf->addr);
173+
return ret;
172174
}
173175

174176
void __user *io_buffer_select(struct io_kiocb *req, size_t *len,

io_uring/net.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -754,6 +754,7 @@ static int io_recvmsg_prep_setup(struct io_kiocb *req)
754754
if (req->opcode == IORING_OP_RECV) {
755755
kmsg->msg.msg_name = NULL;
756756
kmsg->msg.msg_namelen = 0;
757+
kmsg->msg.msg_inq = 0;
757758
kmsg->msg.msg_control = NULL;
758759
kmsg->msg.msg_get_inq = 1;
759760
kmsg->msg.msg_controllen = 0;

io_uring/rw.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -983,6 +983,8 @@ int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags)
983983
io_kbuf_recycle(req, issue_flags);
984984
if (ret < 0)
985985
req_set_fail(req);
986+
} else if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
987+
cflags = io_put_kbuf(req, ret, issue_flags);
986988
} else {
987989
/*
988990
* Any successful return value will keep the multishot read

io_uring/timeout.c

Lines changed: 31 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -85,29 +85,45 @@ static void io_timeout_complete(struct io_kiocb *req, struct io_tw_state *ts)
8585
io_req_task_complete(req, ts);
8686
}
8787

88-
static bool io_kill_timeout(struct io_kiocb *req, int status)
88+
static __cold bool io_flush_killed_timeouts(struct list_head *list, int err)
89+
{
90+
if (list_empty(list))
91+
return false;
92+
93+
while (!list_empty(list)) {
94+
struct io_timeout *timeout;
95+
struct io_kiocb *req;
96+
97+
timeout = list_first_entry(list, struct io_timeout, list);
98+
list_del_init(&timeout->list);
99+
req = cmd_to_io_kiocb(timeout);
100+
if (err)
101+
req_set_fail(req);
102+
io_req_queue_tw_complete(req, err);
103+
}
104+
105+
return true;
106+
}
107+
108+
static void io_kill_timeout(struct io_kiocb *req, struct list_head *list)
89109
__must_hold(&req->ctx->timeout_lock)
90110
{
91111
struct io_timeout_data *io = req->async_data;
92112

93113
if (hrtimer_try_to_cancel(&io->timer) != -1) {
94114
struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
95115

96-
if (status)
97-
req_set_fail(req);
98116
atomic_set(&req->ctx->cq_timeouts,
99117
atomic_read(&req->ctx->cq_timeouts) + 1);
100-
list_del_init(&timeout->list);
101-
io_req_queue_tw_complete(req, status);
102-
return true;
118+
list_move_tail(&timeout->list, list);
103119
}
104-
return false;
105120
}
106121

107122
__cold void io_flush_timeouts(struct io_ring_ctx *ctx)
108123
{
109-
u32 seq;
110124
struct io_timeout *timeout, *tmp;
125+
LIST_HEAD(list);
126+
u32 seq;
111127

112128
raw_spin_lock_irq(&ctx->timeout_lock);
113129
seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
@@ -131,10 +147,11 @@ __cold void io_flush_timeouts(struct io_ring_ctx *ctx)
131147
if (events_got < events_needed)
132148
break;
133149

134-
io_kill_timeout(req, 0);
150+
io_kill_timeout(req, &list);
135151
}
136152
ctx->cq_last_tm_flush = seq;
137153
raw_spin_unlock_irq(&ctx->timeout_lock);
154+
io_flush_killed_timeouts(&list, 0);
138155
}
139156

140157
static void io_req_tw_fail_links(struct io_kiocb *link, struct io_tw_state *ts)
@@ -661,7 +678,7 @@ __cold bool io_kill_timeouts(struct io_ring_ctx *ctx, struct io_uring_task *tctx
661678
bool cancel_all)
662679
{
663680
struct io_timeout *timeout, *tmp;
664-
int canceled = 0;
681+
LIST_HEAD(list);
665682

666683
/*
667684
* completion_lock is needed for io_match_task(). Take it before
@@ -672,11 +689,11 @@ __cold bool io_kill_timeouts(struct io_ring_ctx *ctx, struct io_uring_task *tctx
672689
list_for_each_entry_safe(timeout, tmp, &ctx->timeout_list, list) {
673690
struct io_kiocb *req = cmd_to_io_kiocb(timeout);
674691

675-
if (io_match_task(req, tctx, cancel_all) &&
676-
io_kill_timeout(req, -ECANCELED))
677-
canceled++;
692+
if (io_match_task(req, tctx, cancel_all))
693+
io_kill_timeout(req, &list);
678694
}
679695
raw_spin_unlock_irq(&ctx->timeout_lock);
680696
spin_unlock(&ctx->completion_lock);
681-
return canceled != 0;
697+
698+
return io_flush_killed_timeouts(&list, -ECANCELED);
682699
}

0 commit comments

Comments
 (0)