Skip to content

Commit 7380c60

Browse files
committed
Merge tag 'io_uring-6.15-20250509' of git://git.kernel.dk/linux
Pull io_uring fixes from Jens Axboe: - Fix for linked timeouts arming and firing wrt prep and issue of the request being managed by the linked timeout - Fix for a CQE ordering issue between requests with multishot and using the same buffer group. This is a dumbed down version for this release and for stable, it'll get improved for v6.16 - Tweak the SQPOLL submit batch size. A previous commit made SQPOLL manage its own task_work and chose a tiny batch size, bump it from 8 to 32 to fix a performance regression due to that * tag 'io_uring-6.15-20250509' of git://git.kernel.dk/linux: io_uring/sqpoll: Increase task_work submission batch size io_uring: ensure deferred completions are flushed for multishot io_uring: always arm linked timeouts prior to issue
2 parents 29fe5d5 + 92835ce commit 7380c60

File tree

2 files changed

+24
-36
lines changed

2 files changed

+24
-36
lines changed

io_uring/io_uring.c

Lines changed: 23 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -448,24 +448,6 @@ static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req)
448448
return req->link;
449449
}
450450

451-
static inline struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
452-
{
453-
if (likely(!(req->flags & REQ_F_ARM_LTIMEOUT)))
454-
return NULL;
455-
return __io_prep_linked_timeout(req);
456-
}
457-
458-
static noinline void __io_arm_ltimeout(struct io_kiocb *req)
459-
{
460-
io_queue_linked_timeout(__io_prep_linked_timeout(req));
461-
}
462-
463-
static inline void io_arm_ltimeout(struct io_kiocb *req)
464-
{
465-
if (unlikely(req->flags & REQ_F_ARM_LTIMEOUT))
466-
__io_arm_ltimeout(req);
467-
}
468-
469451
static void io_prep_async_work(struct io_kiocb *req)
470452
{
471453
const struct io_issue_def *def = &io_issue_defs[req->opcode];
@@ -518,7 +500,6 @@ static void io_prep_async_link(struct io_kiocb *req)
518500

519501
static void io_queue_iowq(struct io_kiocb *req)
520502
{
521-
struct io_kiocb *link = io_prep_linked_timeout(req);
522503
struct io_uring_task *tctx = req->tctx;
523504

524505
BUG_ON(!tctx);
@@ -543,8 +524,6 @@ static void io_queue_iowq(struct io_kiocb *req)
543524

544525
trace_io_uring_queue_async_work(req, io_wq_is_hashed(&req->work));
545526
io_wq_enqueue(tctx->io_wq, &req->work);
546-
if (link)
547-
io_queue_linked_timeout(link);
548527
}
549528

550529
static void io_req_queue_iowq_tw(struct io_kiocb *req, io_tw_token_t tw)
@@ -869,6 +848,14 @@ bool io_req_post_cqe(struct io_kiocb *req, s32 res, u32 cflags)
869848
struct io_ring_ctx *ctx = req->ctx;
870849
bool posted;
871850

851+
/*
852+
* If multishot has already posted deferred completions, ensure that
853+
* those are flushed first before posting this one. If not, CQEs
854+
* could get reordered.
855+
*/
856+
if (!wq_list_empty(&ctx->submit_state.compl_reqs))
857+
__io_submit_flush_completions(ctx);
858+
872859
lockdep_assert(!io_wq_current_is_worker());
873860
lockdep_assert_held(&ctx->uring_lock);
874861

@@ -1724,15 +1711,22 @@ static bool io_assign_file(struct io_kiocb *req, const struct io_issue_def *def,
17241711
return !!req->file;
17251712
}
17261713

1714+
#define REQ_ISSUE_SLOW_FLAGS (REQ_F_CREDS | REQ_F_ARM_LTIMEOUT)
1715+
17271716
static inline int __io_issue_sqe(struct io_kiocb *req,
17281717
unsigned int issue_flags,
17291718
const struct io_issue_def *def)
17301719
{
17311720
const struct cred *creds = NULL;
1721+
struct io_kiocb *link = NULL;
17321722
int ret;
17331723

1734-
if (unlikely((req->flags & REQ_F_CREDS) && req->creds != current_cred()))
1735-
creds = override_creds(req->creds);
1724+
if (unlikely(req->flags & REQ_ISSUE_SLOW_FLAGS)) {
1725+
if ((req->flags & REQ_F_CREDS) && req->creds != current_cred())
1726+
creds = override_creds(req->creds);
1727+
if (req->flags & REQ_F_ARM_LTIMEOUT)
1728+
link = __io_prep_linked_timeout(req);
1729+
}
17361730

17371731
if (!def->audit_skip)
17381732
audit_uring_entry(req->opcode);
@@ -1742,8 +1736,12 @@ static inline int __io_issue_sqe(struct io_kiocb *req,
17421736
if (!def->audit_skip)
17431737
audit_uring_exit(!ret, ret);
17441738

1745-
if (creds)
1746-
revert_creds(creds);
1739+
if (unlikely(creds || link)) {
1740+
if (creds)
1741+
revert_creds(creds);
1742+
if (link)
1743+
io_queue_linked_timeout(link);
1744+
}
17471745

17481746
return ret;
17491747
}
@@ -1769,7 +1767,6 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
17691767

17701768
if (ret == IOU_ISSUE_SKIP_COMPLETE) {
17711769
ret = 0;
1772-
io_arm_ltimeout(req);
17731770

17741771
/* If the op doesn't have a file, we're not polling for it */
17751772
if ((req->ctx->flags & IORING_SETUP_IOPOLL) && def->iopoll_queue)
@@ -1824,8 +1821,6 @@ void io_wq_submit_work(struct io_wq_work *work)
18241821
else
18251822
req_ref_get(req);
18261823

1827-
io_arm_ltimeout(req);
1828-
18291824
/* either cancelled or io-wq is dying, so don't touch tctx->iowq */
18301825
if (atomic_read(&work->flags) & IO_WQ_WORK_CANCEL) {
18311826
fail:
@@ -1941,15 +1936,11 @@ struct file *io_file_get_normal(struct io_kiocb *req, int fd)
19411936
static void io_queue_async(struct io_kiocb *req, int ret)
19421937
__must_hold(&req->ctx->uring_lock)
19431938
{
1944-
struct io_kiocb *linked_timeout;
1945-
19461939
if (ret != -EAGAIN || (req->flags & REQ_F_NOWAIT)) {
19471940
io_req_defer_failed(req, ret);
19481941
return;
19491942
}
19501943

1951-
linked_timeout = io_prep_linked_timeout(req);
1952-
19531944
switch (io_arm_poll_handler(req, 0)) {
19541945
case IO_APOLL_READY:
19551946
io_kbuf_recycle(req, 0);
@@ -1962,9 +1953,6 @@ static void io_queue_async(struct io_kiocb *req, int ret)
19621953
case IO_APOLL_OK:
19631954
break;
19641955
}
1965-
1966-
if (linked_timeout)
1967-
io_queue_linked_timeout(linked_timeout);
19681956
}
19691957

19701958
static inline void io_queue_sqe(struct io_kiocb *req)

io_uring/sqpoll.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020
#include "sqpoll.h"
2121

2222
#define IORING_SQPOLL_CAP_ENTRIES_VALUE 8
23-
#define IORING_TW_CAP_ENTRIES_VALUE 8
23+
#define IORING_TW_CAP_ENTRIES_VALUE 32
2424

2525
enum {
2626
IO_SQ_THREAD_SHOULD_STOP = 0,

0 commit comments

Comments
 (0)