Skip to content

Commit e21e1c4

Browse files
committed
io_uring: clear opcode specific data for an early failure
If failure happens before the opcode prep handler is called, ensure that we clear the opcode specific area of the request, which holds data specific to that request type. This prevents errors where opcode handlers either don't get to clear per-request private data since prep isn't even called. Reported-and-tested-by: syzbot+f8e9a371388aa62ecab4@syzkaller.appspotmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent f3a640c commit e21e1c4

File tree

1 file changed

+16
-9
lines changed

1 file changed

+16
-9
lines changed

io_uring/io_uring.c

Lines changed: 16 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -2181,6 +2181,13 @@ static void io_init_req_drain(struct io_kiocb *req)
21812181
}
21822182
}
21832183

2184+
static __cold int io_init_fail_req(struct io_kiocb *req, int err)
2185+
{
2186+
/* ensure per-opcode data is cleared if we fail before prep */
2187+
memset(&req->cmd.data, 0, sizeof(req->cmd.data));
2188+
return err;
2189+
}
2190+
21842191
static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
21852192
const struct io_uring_sqe *sqe)
21862193
__must_hold(&ctx->uring_lock)
@@ -2202,29 +2209,29 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
22022209

22032210
if (unlikely(opcode >= IORING_OP_LAST)) {
22042211
req->opcode = 0;
2205-
return -EINVAL;
2212+
return io_init_fail_req(req, -EINVAL);
22062213
}
22072214
def = &io_issue_defs[opcode];
22082215
if (unlikely(sqe_flags & ~SQE_COMMON_FLAGS)) {
22092216
/* enforce forwards compatibility on users */
22102217
if (sqe_flags & ~SQE_VALID_FLAGS)
2211-
return -EINVAL;
2218+
return io_init_fail_req(req, -EINVAL);
22122219
if (sqe_flags & IOSQE_BUFFER_SELECT) {
22132220
if (!def->buffer_select)
2214-
return -EOPNOTSUPP;
2221+
return io_init_fail_req(req, -EOPNOTSUPP);
22152222
req->buf_index = READ_ONCE(sqe->buf_group);
22162223
}
22172224
if (sqe_flags & IOSQE_CQE_SKIP_SUCCESS)
22182225
ctx->drain_disabled = true;
22192226
if (sqe_flags & IOSQE_IO_DRAIN) {
22202227
if (ctx->drain_disabled)
2221-
return -EOPNOTSUPP;
2228+
return io_init_fail_req(req, -EOPNOTSUPP);
22222229
io_init_req_drain(req);
22232230
}
22242231
}
22252232
if (unlikely(ctx->restricted || ctx->drain_active || ctx->drain_next)) {
22262233
if (ctx->restricted && !io_check_restriction(ctx, req, sqe_flags))
2227-
return -EACCES;
2234+
return io_init_fail_req(req, -EACCES);
22282235
/* knock it to the slow queue path, will be drained there */
22292236
if (ctx->drain_active)
22302237
req->flags |= REQ_F_FORCE_ASYNC;
@@ -2237,9 +2244,9 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
22372244
}
22382245

22392246
if (!def->ioprio && sqe->ioprio)
2240-
return -EINVAL;
2247+
return io_init_fail_req(req, -EINVAL);
22412248
if (!def->iopoll && (ctx->flags & IORING_SETUP_IOPOLL))
2242-
return -EINVAL;
2249+
return io_init_fail_req(req, -EINVAL);
22432250

22442251
if (def->needs_file) {
22452252
struct io_submit_state *state = &ctx->submit_state;
@@ -2263,12 +2270,12 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
22632270

22642271
req->creds = xa_load(&ctx->personalities, personality);
22652272
if (!req->creds)
2266-
return -EINVAL;
2273+
return io_init_fail_req(req, -EINVAL);
22672274
get_cred(req->creds);
22682275
ret = security_uring_override_creds(req->creds);
22692276
if (ret) {
22702277
put_cred(req->creds);
2271-
return ret;
2278+
return io_init_fail_req(req, ret);
22722279
}
22732280
req->flags |= REQ_F_CREDS;
22742281
}

0 commit comments

Comments
 (0)