Skip to content

Commit b69b8ed

Browse files
ps-ushankaraxboe
authored andcommitted
ublk: properly serialize all FETCH_REQs
Most uring_cmds issued against ublk character devices are serialized because each command affects only one queue, and there is an early check which only allows a single task (the queue's ubq_daemon) to issue uring_cmds against that queue. However, this mechanism does not work for FETCH_REQs, since they are expected before ubq_daemon is set. Since FETCH_REQs are only used at initialization and not in the fast path, serialize them using the per-ublk-device mutex. This fixes a number of data races that were previously possible if a badly behaved ublk server decided to issue multiple FETCH_REQs against the same qid/tag concurrently. Reported-by: Caleb Sander Mateos <csander@purestorage.com> Signed-off-by: Uday Shankar <ushankar@purestorage.com> Signed-off-by: Ming Lei <ming.lei@redhat.com> Link: https://lore.kernel.org/r/20250416035444.99569-2-ming.lei@redhat.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent 3bf5406 commit b69b8ed

File tree

1 file changed

+49
-28
lines changed

1 file changed

+49
-28
lines changed

drivers/block/ublk_drv.c

Lines changed: 49 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -1832,8 +1832,8 @@ static void ublk_nosrv_work(struct work_struct *work)
18321832

18331833
/* device can only be started after all IOs are ready */
18341834
static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq)
1835+
__must_hold(&ub->mutex)
18351836
{
1836-
mutex_lock(&ub->mutex);
18371837
ubq->nr_io_ready++;
18381838
if (ublk_queue_ready(ubq)) {
18391839
ubq->ubq_daemon = current;
@@ -1845,7 +1845,6 @@ static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq)
18451845
}
18461846
if (ub->nr_queues_ready == ub->dev_info.nr_hw_queues)
18471847
complete_all(&ub->completion);
1848-
mutex_unlock(&ub->mutex);
18491848
}
18501849

18511850
static void ublk_handle_need_get_data(struct ublk_device *ub, int q_id,
@@ -1929,6 +1928,52 @@ static int ublk_unregister_io_buf(struct io_uring_cmd *cmd,
19291928
return io_buffer_unregister_bvec(cmd, index, issue_flags);
19301929
}
19311930

1931+
static int ublk_fetch(struct io_uring_cmd *cmd, struct ublk_queue *ubq,
1932+
struct ublk_io *io, __u64 buf_addr)
1933+
{
1934+
struct ublk_device *ub = ubq->dev;
1935+
int ret = 0;
1936+
1937+
/*
1938+
* When handling FETCH command for setting up ublk uring queue,
1939+
* ub->mutex is the innermost lock, and we won't block for handling
1940+
* FETCH, so it is fine even for IO_URING_F_NONBLOCK.
1941+
*/
1942+
mutex_lock(&ub->mutex);
1943+
/* UBLK_IO_FETCH_REQ is only allowed before queue is setup */
1944+
if (ublk_queue_ready(ubq)) {
1945+
ret = -EBUSY;
1946+
goto out;
1947+
}
1948+
1949+
/* allow each command to be FETCHed at most once */
1950+
if (io->flags & UBLK_IO_FLAG_ACTIVE) {
1951+
ret = -EINVAL;
1952+
goto out;
1953+
}
1954+
1955+
WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV);
1956+
1957+
if (ublk_need_map_io(ubq)) {
1958+
/*
1959+
* FETCH_RQ has to provide IO buffer if NEED GET
1960+
* DATA is not enabled
1961+
*/
1962+
if (!buf_addr && !ublk_need_get_data(ubq))
1963+
goto out;
1964+
} else if (buf_addr) {
1965+
/* User copy requires addr to be unset */
1966+
ret = -EINVAL;
1967+
goto out;
1968+
}
1969+
1970+
ublk_fill_io_cmd(io, cmd, buf_addr);
1971+
ublk_mark_io_ready(ub, ubq);
1972+
out:
1973+
mutex_unlock(&ub->mutex);
1974+
return ret;
1975+
}
1976+
19321977
static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
19331978
unsigned int issue_flags,
19341979
const struct ublksrv_io_cmd *ub_cmd)
@@ -1985,33 +2030,9 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
19852030
case UBLK_IO_UNREGISTER_IO_BUF:
19862031
return ublk_unregister_io_buf(cmd, ub_cmd->addr, issue_flags);
19872032
case UBLK_IO_FETCH_REQ:
1988-
/* UBLK_IO_FETCH_REQ is only allowed before queue is setup */
1989-
if (ublk_queue_ready(ubq)) {
1990-
ret = -EBUSY;
1991-
goto out;
1992-
}
1993-
/*
1994-
* The io is being handled by server, so COMMIT_RQ is expected
1995-
* instead of FETCH_REQ
1996-
*/
1997-
if (io->flags & UBLK_IO_FLAG_OWNED_BY_SRV)
1998-
goto out;
1999-
2000-
if (ublk_need_map_io(ubq)) {
2001-
/*
2002-
* FETCH_RQ has to provide IO buffer if NEED GET
2003-
* DATA is not enabled
2004-
*/
2005-
if (!ub_cmd->addr && !ublk_need_get_data(ubq))
2006-
goto out;
2007-
} else if (ub_cmd->addr) {
2008-
/* User copy requires addr to be unset */
2009-
ret = -EINVAL;
2033+
ret = ublk_fetch(cmd, ubq, io, ub_cmd->addr);
2034+
if (ret)
20102035
goto out;
2011-
}
2012-
2013-
ublk_fill_io_cmd(io, cmd, ub_cmd->addr);
2014-
ublk_mark_io_ready(ub, ubq);
20152036
break;
20162037
case UBLK_IO_COMMIT_AND_FETCH_REQ:
20172038
req = blk_mq_tag_to_rq(ub->tag_set.tags[ub_cmd->q_id], tag);

0 commit comments

Comments
 (0)