Skip to content

Commit d6211eb

Browse files
committed
io_uring/uring_cmd: unconditionally copy SQEs at prep time
This isn't generally necessary, but conditions have been observed where SQE data is accessed from the original SQE after prep has been done and outside of the initial issue. Opcode prep handlers must ensure that any SQE related data is stable beyond the prep phase, but uring_cmd is a bit special in how it handles the SQE which makes it susceptible to reading stale data. If the application has reused the SQE before the original completes, then that can lead to data corruption. Down the line we can relax this again once uring_cmd has been sanitized a bit, and avoid unnecessarily copying the SQE. Fixes: 5eff57f ("io_uring/uring_cmd: defer SQE copying until it's needed") Reported-by: Caleb Sander Mateos <csander@purestorage.com> Reviewed-by: Caleb Sander Mateos <csander@purestorage.com> Reviewed-by: Li Zetao <lizetao1@huawei.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent 2b4fc4c commit d6211eb

File tree

1 file changed

+11
-23
lines changed

1 file changed

+11
-23
lines changed

io_uring/uring_cmd.c

Lines changed: 11 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -165,15 +165,6 @@ void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, u64 res2,
165165
}
166166
EXPORT_SYMBOL_GPL(io_uring_cmd_done);
167167

168-
static void io_uring_cmd_cache_sqes(struct io_kiocb *req)
169-
{
170-
struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
171-
struct io_uring_cmd_data *cache = req->async_data;
172-
173-
memcpy(cache->sqes, ioucmd->sqe, uring_sqe_size(req->ctx));
174-
ioucmd->sqe = cache->sqes;
175-
}
176-
177168
static int io_uring_cmd_prep_setup(struct io_kiocb *req,
178169
const struct io_uring_sqe *sqe)
179170
{
@@ -185,10 +176,15 @@ static int io_uring_cmd_prep_setup(struct io_kiocb *req,
185176
return -ENOMEM;
186177
cache->op_data = NULL;
187178

188-
ioucmd->sqe = sqe;
189-
/* defer memcpy until we need it */
190-
if (unlikely(req->flags & REQ_F_FORCE_ASYNC))
191-
io_uring_cmd_cache_sqes(req);
179+
/*
180+
* Unconditionally cache the SQE for now - this is only needed for
181+
* requests that go async, but prep handlers must ensure that any
182+
* sqe data is stable beyond prep. Since uring_cmd is special in
183+
* that it doesn't read in per-op data, play it safe and ensure that
184+
* any SQE data is stable beyond prep. This can later get relaxed.
185+
*/
186+
memcpy(cache->sqes, sqe, uring_sqe_size(req->ctx));
187+
ioucmd->sqe = cache->sqes;
192188
return 0;
193189
}
194190

@@ -251,16 +247,8 @@ int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
251247
}
252248

253249
ret = file->f_op->uring_cmd(ioucmd, issue_flags);
254-
if (ret == -EAGAIN) {
255-
struct io_uring_cmd_data *cache = req->async_data;
256-
257-
if (ioucmd->sqe != cache->sqes)
258-
io_uring_cmd_cache_sqes(req);
259-
return -EAGAIN;
260-
} else if (ret == -EIOCBQUEUED) {
261-
return -EIOCBQUEUED;
262-
}
263-
250+
if (ret == -EAGAIN || ret == -EIOCBQUEUED)
251+
return ret;
264252
if (ret < 0)
265253
req_set_fail(req);
266254
io_req_uring_cleanup(req, issue_flags);

0 commit comments

Comments
 (0)