Skip to content

Commit d86eaed

Browse files
committed
io_uring: cleanup io_aux_cqe() API
Everybody is passing in the request, so get rid of the io_ring_ctx and explicit user_data pass-in. Both the ctx and user_data can be deduced from the request at hand. Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent c92fcfc commit d86eaed

File tree

5 files changed

+12
-11
lines changed

5 files changed

+12
-11
lines changed

io_uring/io_uring.c

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -935,9 +935,11 @@ bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags
935935
return __io_post_aux_cqe(ctx, user_data, res, cflags, true);
936936
}
937937

938-
bool io_aux_cqe(struct io_ring_ctx *ctx, bool defer, u64 user_data, s32 res, u32 cflags,
938+
bool io_aux_cqe(const struct io_kiocb *req, bool defer, s32 res, u32 cflags,
939939
bool allow_overflow)
940940
{
941+
struct io_ring_ctx *ctx = req->ctx;
942+
u64 user_data = req->cqe.user_data;
941943
struct io_uring_cqe *cqe;
942944
unsigned int length;
943945

io_uring/io_uring.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ int io_run_task_work_sig(struct io_ring_ctx *ctx);
4747
void io_req_defer_failed(struct io_kiocb *req, s32 res);
4848
void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags);
4949
bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags);
50-
bool io_aux_cqe(struct io_ring_ctx *ctx, bool defer, u64 user_data, s32 res, u32 cflags,
50+
bool io_aux_cqe(const struct io_kiocb *req, bool defer, s32 res, u32 cflags,
5151
bool allow_overflow);
5252
void __io_commit_cqring_flush(struct io_ring_ctx *ctx);
5353

io_uring/net.c

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -632,8 +632,8 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
632632
}
633633

634634
if (!mshot_finished) {
635-
if (io_aux_cqe(req->ctx, issue_flags & IO_URING_F_COMPLETE_DEFER,
636-
req->cqe.user_data, *ret, cflags | IORING_CQE_F_MORE, true)) {
635+
if (io_aux_cqe(req, issue_flags & IO_URING_F_COMPLETE_DEFER,
636+
*ret, cflags | IORING_CQE_F_MORE, true)) {
637637
io_recv_prep_retry(req);
638638
/* Known not-empty or unknown state, retry */
639639
if (cflags & IORING_CQE_F_SOCK_NONEMPTY ||
@@ -1304,7 +1304,6 @@ int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
13041304

13051305
int io_accept(struct io_kiocb *req, unsigned int issue_flags)
13061306
{
1307-
struct io_ring_ctx *ctx = req->ctx;
13081307
struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
13091308
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
13101309
unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
@@ -1354,8 +1353,8 @@ int io_accept(struct io_kiocb *req, unsigned int issue_flags)
13541353

13551354
if (ret < 0)
13561355
return ret;
1357-
if (io_aux_cqe(ctx, issue_flags & IO_URING_F_COMPLETE_DEFER,
1358-
req->cqe.user_data, ret, IORING_CQE_F_MORE, true))
1356+
if (io_aux_cqe(req, issue_flags & IO_URING_F_COMPLETE_DEFER, ret,
1357+
IORING_CQE_F_MORE, true))
13591358
goto retry;
13601359

13611360
return -ECANCELED;

io_uring/poll.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -300,8 +300,8 @@ static int io_poll_check_events(struct io_kiocb *req, struct io_tw_state *ts)
300300
__poll_t mask = mangle_poll(req->cqe.res &
301301
req->apoll_events);
302302

303-
if (!io_aux_cqe(req->ctx, ts->locked, req->cqe.user_data,
304-
mask, IORING_CQE_F_MORE, false)) {
303+
if (!io_aux_cqe(req, ts->locked, mask,
304+
IORING_CQE_F_MORE, false)) {
305305
io_req_set_res(req, mask, 0);
306306
return IOU_POLL_REMOVE_POLL_USE_RES;
307307
}

io_uring/timeout.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -73,8 +73,8 @@ static void io_timeout_complete(struct io_kiocb *req, struct io_tw_state *ts)
7373

7474
if (!io_timeout_finish(timeout, data)) {
7575
bool filled;
76-
filled = io_aux_cqe(ctx, ts->locked, req->cqe.user_data, -ETIME,
77-
IORING_CQE_F_MORE, false);
76+
filled = io_aux_cqe(req, ts->locked, -ETIME, IORING_CQE_F_MORE,
77+
false);
7878
if (filled) {
7979
/* re-arm timer */
8080
spin_lock_irq(&ctx->timeout_lock);

0 commit comments

Comments
 (0)