Skip to content

Commit b53e523

Browse files
committed
io_uring: always arm linked timeouts prior to issue
There are a few spots where linked timeouts are armed, and not all of them adhere to the pre-arm, attempt issue, post-arm pattern. This can be problematic if the linked request returns that it will trigger a callback later, and does so before the linked timeout is fully armed. Consolidate all the linked timeout handling into __io_issue_sqe(), rather than have it spread throughout the various issue entry points. Cc: stable@vger.kernel.org Link: axboe/liburing#1390 Reported-by: Chase Hiltz <chase@path.net> Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent f024d3a commit b53e523

File tree

1 file changed

+15
-35
lines changed

1 file changed

+15
-35
lines changed

io_uring/io_uring.c

Lines changed: 15 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -448,24 +448,6 @@ static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req)
448448
return req->link;
449449
}
450450

451-
static inline struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
452-
{
453-
if (likely(!(req->flags & REQ_F_ARM_LTIMEOUT)))
454-
return NULL;
455-
return __io_prep_linked_timeout(req);
456-
}
457-
458-
static noinline void __io_arm_ltimeout(struct io_kiocb *req)
459-
{
460-
io_queue_linked_timeout(__io_prep_linked_timeout(req));
461-
}
462-
463-
static inline void io_arm_ltimeout(struct io_kiocb *req)
464-
{
465-
if (unlikely(req->flags & REQ_F_ARM_LTIMEOUT))
466-
__io_arm_ltimeout(req);
467-
}
468-
469451
static void io_prep_async_work(struct io_kiocb *req)
470452
{
471453
const struct io_issue_def *def = &io_issue_defs[req->opcode];
@@ -518,7 +500,6 @@ static void io_prep_async_link(struct io_kiocb *req)
518500

519501
static void io_queue_iowq(struct io_kiocb *req)
520502
{
521-
struct io_kiocb *link = io_prep_linked_timeout(req);
522503
struct io_uring_task *tctx = req->tctx;
523504

524505
BUG_ON(!tctx);
@@ -543,8 +524,6 @@ static void io_queue_iowq(struct io_kiocb *req)
543524

544525
trace_io_uring_queue_async_work(req, io_wq_is_hashed(&req->work));
545526
io_wq_enqueue(tctx->io_wq, &req->work);
546-
if (link)
547-
io_queue_linked_timeout(link);
548527
}
549528

550529
static void io_req_queue_iowq_tw(struct io_kiocb *req, io_tw_token_t tw)
@@ -1724,15 +1703,22 @@ static bool io_assign_file(struct io_kiocb *req, const struct io_issue_def *def,
17241703
return !!req->file;
17251704
}
17261705

1706+
#define REQ_ISSUE_SLOW_FLAGS (REQ_F_CREDS | REQ_F_ARM_LTIMEOUT)
1707+
17271708
static inline int __io_issue_sqe(struct io_kiocb *req,
17281709
unsigned int issue_flags,
17291710
const struct io_issue_def *def)
17301711
{
17311712
const struct cred *creds = NULL;
1713+
struct io_kiocb *link = NULL;
17321714
int ret;
17331715

1734-
if (unlikely((req->flags & REQ_F_CREDS) && req->creds != current_cred()))
1735-
creds = override_creds(req->creds);
1716+
if (unlikely(req->flags & REQ_ISSUE_SLOW_FLAGS)) {
1717+
if ((req->flags & REQ_F_CREDS) && req->creds != current_cred())
1718+
creds = override_creds(req->creds);
1719+
if (req->flags & REQ_F_ARM_LTIMEOUT)
1720+
link = __io_prep_linked_timeout(req);
1721+
}
17361722

17371723
if (!def->audit_skip)
17381724
audit_uring_entry(req->opcode);
@@ -1742,8 +1728,12 @@ static inline int __io_issue_sqe(struct io_kiocb *req,
17421728
if (!def->audit_skip)
17431729
audit_uring_exit(!ret, ret);
17441730

1745-
if (creds)
1746-
revert_creds(creds);
1731+
if (unlikely(creds || link)) {
1732+
if (creds)
1733+
revert_creds(creds);
1734+
if (link)
1735+
io_queue_linked_timeout(link);
1736+
}
17471737

17481738
return ret;
17491739
}
@@ -1769,7 +1759,6 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
17691759

17701760
if (ret == IOU_ISSUE_SKIP_COMPLETE) {
17711761
ret = 0;
1772-
io_arm_ltimeout(req);
17731762

17741763
/* If the op doesn't have a file, we're not polling for it */
17751764
if ((req->ctx->flags & IORING_SETUP_IOPOLL) && def->iopoll_queue)
@@ -1824,8 +1813,6 @@ void io_wq_submit_work(struct io_wq_work *work)
18241813
else
18251814
req_ref_get(req);
18261815

1827-
io_arm_ltimeout(req);
1828-
18291816
/* either cancelled or io-wq is dying, so don't touch tctx->iowq */
18301817
if (atomic_read(&work->flags) & IO_WQ_WORK_CANCEL) {
18311818
fail:
@@ -1941,15 +1928,11 @@ struct file *io_file_get_normal(struct io_kiocb *req, int fd)
19411928
static void io_queue_async(struct io_kiocb *req, int ret)
19421929
__must_hold(&req->ctx->uring_lock)
19431930
{
1944-
struct io_kiocb *linked_timeout;
1945-
19461931
if (ret != -EAGAIN || (req->flags & REQ_F_NOWAIT)) {
19471932
io_req_defer_failed(req, ret);
19481933
return;
19491934
}
19501935

1951-
linked_timeout = io_prep_linked_timeout(req);
1952-
19531936
switch (io_arm_poll_handler(req, 0)) {
19541937
case IO_APOLL_READY:
19551938
io_kbuf_recycle(req, 0);
@@ -1962,9 +1945,6 @@ static void io_queue_async(struct io_kiocb *req, int ret)
19621945
case IO_APOLL_OK:
19631946
break;
19641947
}
1965-
1966-
if (linked_timeout)
1967-
io_queue_linked_timeout(linked_timeout);
19681948
}
19691949

19701950
static inline void io_queue_sqe(struct io_kiocb *req)

0 commit comments

Comments
 (0)