Skip to content

Commit dfbe556

Browse files
committed
io_uring: flush offloaded and delayed task_work on exit
io_uring offloads task_work for cancelation purposes when the task is exiting. This is conceptually fine, but we should be nicer and actually wait for that work to complete before returning. Add an argument to io_fallback_tw() telling it to flush the deferred work when it's all queued up, and have it flush a ctx behind whenever the ctx changes. Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent 10e1c0d commit dfbe556

File tree

1 file changed

+19
-3
lines changed

1 file changed

+19
-3
lines changed

io_uring/io_uring.c

Lines changed: 19 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1237,18 +1237,32 @@ static inline struct llist_node *io_llist_cmpxchg(struct llist_head *head,
12371237
return cmpxchg(&head->first, old, new);
12381238
}
12391239

1240-
static __cold void io_fallback_tw(struct io_uring_task *tctx)
1240+
static __cold void io_fallback_tw(struct io_uring_task *tctx, bool sync)
12411241
{
12421242
struct llist_node *node = llist_del_all(&tctx->task_list);
1243+
struct io_ring_ctx *last_ctx = NULL;
12431244
struct io_kiocb *req;
12441245

12451246
while (node) {
12461247
req = container_of(node, struct io_kiocb, io_task_work.node);
12471248
node = node->next;
1249+
if (sync && last_ctx != req->ctx) {
1250+
if (last_ctx) {
1251+
flush_delayed_work(&last_ctx->fallback_work);
1252+
percpu_ref_put(&last_ctx->refs);
1253+
}
1254+
last_ctx = req->ctx;
1255+
percpu_ref_get(&last_ctx->refs);
1256+
}
12481257
if (llist_add(&req->io_task_work.node,
12491258
&req->ctx->fallback_llist))
12501259
schedule_delayed_work(&req->ctx->fallback_work, 1);
12511260
}
1261+
1262+
if (last_ctx) {
1263+
flush_delayed_work(&last_ctx->fallback_work);
1264+
percpu_ref_put(&last_ctx->refs);
1265+
}
12521266
}
12531267

12541268
void tctx_task_work(struct callback_head *cb)
@@ -1263,7 +1277,7 @@ void tctx_task_work(struct callback_head *cb)
12631277
unsigned int count = 0;
12641278

12651279
if (unlikely(current->flags & PF_EXITING)) {
1266-
io_fallback_tw(tctx);
1280+
io_fallback_tw(tctx, true);
12671281
return;
12681282
}
12691283

@@ -1358,7 +1372,7 @@ static void io_req_normal_work_add(struct io_kiocb *req)
13581372
if (likely(!task_work_add(req->task, &tctx->task_work, ctx->notify_method)))
13591373
return;
13601374

1361-
io_fallback_tw(tctx);
1375+
io_fallback_tw(tctx, false);
13621376
}
13631377

13641378
void __io_req_task_work_add(struct io_kiocb *req, unsigned flags)
@@ -3108,6 +3122,8 @@ static __cold void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
31083122
if (ctx->rings)
31093123
io_kill_timeouts(ctx, NULL, true);
31103124

3125+
flush_delayed_work(&ctx->fallback_work);
3126+
31113127
INIT_WORK(&ctx->exit_work, io_ring_exit_work);
31123128
/*
31133129
* Use system_unbound_wq to avoid spawning tons of event kworkers

0 commit comments

Comments
 (0)