Skip to content

Commit 2ce7592

Browse files
committed
Merge tag 'io_uring-6.2-2023-01-13' of git://git.kernel.dk/linux
Pull io_uring fixes from Jens Axboe: "A fix for a regression that happened last week, rest is fixes that will be headed to stable as well. In detail: - Fix for a regression added with the leak fix from last week (me) - In writing a test case for that leak, inadvertently discovered a case where we a poll request can race. So fix that up and mark it for stable, and also ensure that fdinfo covers both the poll tables that we have. The latter was an oversight when the split poll table were added (me) - Fix for a lockdep reported issue with IOPOLL (Pavel)" * tag 'io_uring-6.2-2023-01-13' of git://git.kernel.dk/linux: io_uring: lock overflowing for IOPOLL io_uring/poll: attempt request issue after racy poll wakeup io_uring/fdinfo: include locked hash table in fdinfo output io_uring/poll: add hash if ready poll request can't complete inline io_uring/io-wq: only free worker if it was allocated for creation
2 parents 9e058c2 + 544d163 commit 2ce7592

File tree

4 files changed

+53
-20
lines changed

4 files changed

+53
-20
lines changed

io_uring/fdinfo.c

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -170,21 +170,29 @@ static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx,
170170
xa_for_each(&ctx->personalities, index, cred)
171171
io_uring_show_cred(m, index, cred);
172172
}
173-
if (has_lock)
174-
mutex_unlock(&ctx->uring_lock);
175173

176174
seq_puts(m, "PollList:\n");
177175
for (i = 0; i < (1U << ctx->cancel_table.hash_bits); i++) {
178176
struct io_hash_bucket *hb = &ctx->cancel_table.hbs[i];
177+
struct io_hash_bucket *hbl = &ctx->cancel_table_locked.hbs[i];
179178
struct io_kiocb *req;
180179

181180
spin_lock(&hb->lock);
182181
hlist_for_each_entry(req, &hb->list, hash_node)
183182
seq_printf(m, " op=%d, task_works=%d\n", req->opcode,
184183
task_work_pending(req->task));
185184
spin_unlock(&hb->lock);
185+
186+
if (!has_lock)
187+
continue;
188+
hlist_for_each_entry(req, &hbl->list, hash_node)
189+
seq_printf(m, " op=%d, task_works=%d\n", req->opcode,
190+
task_work_pending(req->task));
186191
}
187192

193+
if (has_lock)
194+
mutex_unlock(&ctx->uring_lock);
195+
188196
seq_puts(m, "CqOverflowList:\n");
189197
spin_lock(&ctx->completion_lock);
190198
list_for_each_entry(ocqe, &ctx->cq_overflow_list, list) {

io_uring/io-wq.c

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1230,7 +1230,12 @@ static void io_wq_cancel_tw_create(struct io_wq *wq)
12301230

12311231
worker = container_of(cb, struct io_worker, create_work);
12321232
io_worker_cancel_cb(worker);
1233-
kfree(worker);
1233+
/*
1234+
* Only the worker continuation helper has worker allocated and
1235+
* hence needs freeing.
1236+
*/
1237+
if (cb->func == create_worker_cont)
1238+
kfree(worker);
12341239
}
12351240
}
12361241

io_uring/poll.c

Lines changed: 32 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -223,21 +223,22 @@ enum {
223223
IOU_POLL_DONE = 0,
224224
IOU_POLL_NO_ACTION = 1,
225225
IOU_POLL_REMOVE_POLL_USE_RES = 2,
226+
IOU_POLL_REISSUE = 3,
226227
};
227228

228229
/*
229230
* All poll tw should go through this. Checks for poll events, manages
230231
* references, does rewait, etc.
231232
*
232-
* Returns a negative error on failure. IOU_POLL_NO_ACTION when no action require,
233-
* which is either spurious wakeup or multishot CQE is served.
234-
* IOU_POLL_DONE when it's done with the request, then the mask is stored in req->cqe.res.
235-
* IOU_POLL_REMOVE_POLL_USE_RES indicates to remove multishot poll and that the result
236-
* is stored in req->cqe.
233+
* Returns a negative error on failure. IOU_POLL_NO_ACTION when no action
234+
* require, which is either spurious wakeup or multishot CQE is served.
235+
* IOU_POLL_DONE when it's done with the request, then the mask is stored in
236+
* req->cqe.res. IOU_POLL_REMOVE_POLL_USE_RES indicates to remove multishot
237+
* poll and that the result is stored in req->cqe.
237238
*/
238239
static int io_poll_check_events(struct io_kiocb *req, bool *locked)
239240
{
240-
int v, ret;
241+
int v;
241242

242243
/* req->task == current here, checking PF_EXITING is safe */
243244
if (unlikely(req->task->flags & PF_EXITING))
@@ -276,10 +277,15 @@ static int io_poll_check_events(struct io_kiocb *req, bool *locked)
276277
if (!req->cqe.res) {
277278
struct poll_table_struct pt = { ._key = req->apoll_events };
278279
req->cqe.res = vfs_poll(req->file, &pt) & req->apoll_events;
280+
/*
281+
* We got woken with a mask, but someone else got to
282+
* it first. The above vfs_poll() doesn't add us back
283+
* to the waitqueue, so if we get nothing back, we
284+
* should be safe and attempt a reissue.
285+
*/
286+
if (unlikely(!req->cqe.res))
287+
return IOU_POLL_REISSUE;
279288
}
280-
281-
if ((unlikely(!req->cqe.res)))
282-
continue;
283289
if (req->apoll_events & EPOLLONESHOT)
284290
return IOU_POLL_DONE;
285291

@@ -294,7 +300,7 @@ static int io_poll_check_events(struct io_kiocb *req, bool *locked)
294300
return IOU_POLL_REMOVE_POLL_USE_RES;
295301
}
296302
} else {
297-
ret = io_poll_issue(req, locked);
303+
int ret = io_poll_issue(req, locked);
298304
if (ret == IOU_STOP_MULTISHOT)
299305
return IOU_POLL_REMOVE_POLL_USE_RES;
300306
if (ret < 0)
@@ -330,6 +336,9 @@ static void io_poll_task_func(struct io_kiocb *req, bool *locked)
330336

331337
poll = io_kiocb_to_cmd(req, struct io_poll);
332338
req->cqe.res = mangle_poll(req->cqe.res & poll->events);
339+
} else if (ret == IOU_POLL_REISSUE) {
340+
io_req_task_submit(req, locked);
341+
return;
333342
} else if (ret != IOU_POLL_REMOVE_POLL_USE_RES) {
334343
req->cqe.res = ret;
335344
req_set_fail(req);
@@ -342,7 +351,7 @@ static void io_poll_task_func(struct io_kiocb *req, bool *locked)
342351

343352
if (ret == IOU_POLL_REMOVE_POLL_USE_RES)
344353
io_req_task_complete(req, locked);
345-
else if (ret == IOU_POLL_DONE)
354+
else if (ret == IOU_POLL_DONE || ret == IOU_POLL_REISSUE)
346355
io_req_task_submit(req, locked);
347356
else
348357
io_req_defer_failed(req, ret);
@@ -533,6 +542,14 @@ static bool io_poll_can_finish_inline(struct io_kiocb *req,
533542
return pt->owning || io_poll_get_ownership(req);
534543
}
535544

545+
static void io_poll_add_hash(struct io_kiocb *req)
546+
{
547+
if (req->flags & REQ_F_HASH_LOCKED)
548+
io_poll_req_insert_locked(req);
549+
else
550+
io_poll_req_insert(req);
551+
}
552+
536553
/*
537554
* Returns 0 when it's handed over for polling. The caller owns the requests if
538555
* it returns non-zero, but otherwise should not touch it. Negative values
@@ -591,18 +608,17 @@ static int __io_arm_poll_handler(struct io_kiocb *req,
591608

592609
if (mask &&
593610
((poll->events & (EPOLLET|EPOLLONESHOT)) == (EPOLLET|EPOLLONESHOT))) {
594-
if (!io_poll_can_finish_inline(req, ipt))
611+
if (!io_poll_can_finish_inline(req, ipt)) {
612+
io_poll_add_hash(req);
595613
return 0;
614+
}
596615
io_poll_remove_entries(req);
597616
ipt->result_mask = mask;
598617
/* no one else has access to the req, forget about the ref */
599618
return 1;
600619
}
601620

602-
if (req->flags & REQ_F_HASH_LOCKED)
603-
io_poll_req_insert_locked(req);
604-
else
605-
io_poll_req_insert(req);
621+
io_poll_add_hash(req);
606622

607623
if (mask && (poll->events & EPOLLET) &&
608624
io_poll_can_finish_inline(req, ipt)) {

io_uring/rw.c

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1062,7 +1062,11 @@ int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
10621062
continue;
10631063

10641064
req->cqe.flags = io_put_kbuf(req, 0);
1065-
io_fill_cqe_req(req->ctx, req);
1065+
if (unlikely(!__io_fill_cqe_req(ctx, req))) {
1066+
spin_lock(&ctx->completion_lock);
1067+
io_req_cqe_overflow(req);
1068+
spin_unlock(&ctx->completion_lock);
1069+
}
10661070
}
10671071

10681072
if (unlikely(!nr_events))

0 commit comments

Comments
 (0)