Skip to content

Commit e63d222

Browse files
Ming Leiaxboe
authored andcommitted
ublk: simplify aborting ublk request
Now ublk_abort_queue() is moved to ublk char device release handler, meantime our request queue is "quiesced" because either ->canceling was set from uring_cmd cancel function or all IOs are inflight and can't be completed by ublk server, things becomes easy much: - all uring_cmd are done, so we needn't to mark io as UBLK_IO_FLAG_ABORTED for handling completion from uring_cmd - ublk char device is closed, no one can hold IO request reference any more, so we can simply complete this request or requeue it for ublk_nosrv_should_reissue_outstanding. Reviewed-by: Uday Shankar <ushankar@purestorage.com> Signed-off-by: Ming Lei <ming.lei@redhat.com> Link: https://lore.kernel.org/r/20250416035444.99569-8-ming.lei@redhat.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent 736b005 commit e63d222

File tree

1 file changed

+20
-62
lines changed

1 file changed

+20
-62
lines changed

drivers/block/ublk_drv.c

Lines changed: 20 additions & 62 deletions
Original file line numberDiff line numberDiff line change
@@ -122,15 +122,6 @@ struct ublk_uring_cmd_pdu {
122122
*/
123123
#define UBLK_IO_FLAG_OWNED_BY_SRV 0x02
124124

125-
/*
126-
* IO command is aborted, so this flag is set in case of
127-
* !UBLK_IO_FLAG_ACTIVE.
128-
*
129-
* After this flag is observed, any pending or new incoming request
130-
* associated with this io command will be failed immediately
131-
*/
132-
#define UBLK_IO_FLAG_ABORTED 0x04
133-
134125
/*
135126
* UBLK_IO_FLAG_NEED_GET_DATA is set because IO command requires
136127
* get data buffer address from ublksrv.
@@ -1083,12 +1074,6 @@ static inline void __ublk_complete_rq(struct request *req)
10831074
unsigned int unmapped_bytes;
10841075
blk_status_t res = BLK_STS_OK;
10851076

1086-
/* called from ublk_abort_queue() code path */
1087-
if (io->flags & UBLK_IO_FLAG_ABORTED) {
1088-
res = BLK_STS_IOERR;
1089-
goto exit;
1090-
}
1091-
10921077
/* failed read IO if nothing is read */
10931078
if (!io->res && req_op(req) == REQ_OP_READ)
10941079
io->res = -EIO;
@@ -1138,47 +1123,6 @@ static void ublk_complete_rq(struct kref *ref)
11381123
__ublk_complete_rq(req);
11391124
}
11401125

1141-
static void ublk_do_fail_rq(struct request *req)
1142-
{
1143-
struct ublk_queue *ubq = req->mq_hctx->driver_data;
1144-
1145-
if (ublk_nosrv_should_reissue_outstanding(ubq->dev))
1146-
blk_mq_requeue_request(req, false);
1147-
else
1148-
__ublk_complete_rq(req);
1149-
}
1150-
1151-
static void ublk_fail_rq_fn(struct kref *ref)
1152-
{
1153-
struct ublk_rq_data *data = container_of(ref, struct ublk_rq_data,
1154-
ref);
1155-
struct request *req = blk_mq_rq_from_pdu(data);
1156-
1157-
ublk_do_fail_rq(req);
1158-
}
1159-
1160-
/*
1161-
* Since ublk_rq_task_work_cb always fails requests immediately during
1162-
* exiting, __ublk_fail_req() is only called from abort context during
1163-
* exiting. So lock is unnecessary.
1164-
*
1165-
* Also aborting may not be started yet, keep in mind that one failed
1166-
* request may be issued by block layer again.
1167-
*/
1168-
static void __ublk_fail_req(struct ublk_queue *ubq, struct ublk_io *io,
1169-
struct request *req)
1170-
{
1171-
WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_ACTIVE);
1172-
1173-
if (ublk_need_req_ref(ubq)) {
1174-
struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
1175-
1176-
kref_put(&data->ref, ublk_fail_rq_fn);
1177-
} else {
1178-
ublk_do_fail_rq(req);
1179-
}
1180-
}
1181-
11821126
static void ubq_complete_io_cmd(struct ublk_io *io, int res,
11831127
unsigned issue_flags)
11841128
{
@@ -1667,10 +1611,26 @@ static void ublk_commit_completion(struct ublk_device *ub,
16671611
ublk_put_req_ref(ubq, req);
16681612
}
16691613

1614+
static void __ublk_fail_req(struct ublk_queue *ubq, struct ublk_io *io,
1615+
struct request *req)
1616+
{
1617+
WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_ACTIVE);
1618+
1619+
if (ublk_nosrv_should_reissue_outstanding(ubq->dev))
1620+
blk_mq_requeue_request(req, false);
1621+
else {
1622+
io->res = -EIO;
1623+
__ublk_complete_rq(req);
1624+
}
1625+
}
1626+
16701627
/*
1671-
* Called from ubq_daemon context via cancel fn, meantime quiesce ublk
1672-
* blk-mq queue, so we are called exclusively with blk-mq and ubq_daemon
1673-
* context, so everything is serialized.
1628+
* Called from ublk char device release handler, when any uring_cmd is
1629+
* done, meantime request queue is "quiesced" since all inflight requests
1630+
* can't be completed because ublk server is dead.
1631+
*
1632+
* So no one can hold our request IO reference any more, simply ignore the
1633+
* reference, and complete the request immediately
16741634
*/
16751635
static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq)
16761636
{
@@ -1687,10 +1647,8 @@ static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq)
16871647
* will do it
16881648
*/
16891649
rq = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], i);
1690-
if (rq && blk_mq_request_started(rq)) {
1691-
io->flags |= UBLK_IO_FLAG_ABORTED;
1650+
if (rq && blk_mq_request_started(rq))
16921651
__ublk_fail_req(ubq, io, rq);
1693-
}
16941652
}
16951653
}
16961654
}

0 commit comments

Comments
 (0)