Skip to content

Commit 728cbac

Browse files
Ming Leiaxboe
authored andcommitted
ublk: move device reset into ublk_ch_release()
ublk_ch_release() is called after ublk char device is closed, when all uring_cmd are done, so it is perfect fine to move ublk device reset to ublk_ch_release() from ublk_ctrl_start_recovery(). This way can avoid to grab the exiting daemon task_struct too long. However, reset of the following ublk IO flags has to be moved until ublk io_uring queues are ready: - ubq->canceling For requeuing IO in case of ublk_nosrv_dev_should_queue_io() before device is recovered - ubq->fail_io For failing IO in case of UBLK_F_USER_RECOVERY_FAIL_IO before device is recovered - ublk_io->flags For preventing using io->cmd With this way, recovery is simplified a lot. Signed-off-by: Ming Lei <ming.lei@redhat.com> Link: https://lore.kernel.org/r/20250416035444.99569-5-ming.lei@redhat.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent 7e26cb6 commit 728cbac

File tree

1 file changed

+72
-49
lines changed

1 file changed

+72
-49
lines changed

drivers/block/ublk_drv.c

Lines changed: 72 additions & 49 deletions
Original file line numberDiff line numberDiff line change
@@ -1074,7 +1074,7 @@ static inline struct ublk_uring_cmd_pdu *ublk_get_uring_cmd_pdu(
10741074

10751075
static inline bool ubq_daemon_is_dying(struct ublk_queue *ubq)
10761076
{
1077-
return ubq->ubq_daemon->flags & PF_EXITING;
1077+
return !ubq->ubq_daemon || ubq->ubq_daemon->flags & PF_EXITING;
10781078
}
10791079

10801080
/* todo: handle partial completion */
@@ -1470,6 +1470,37 @@ static const struct blk_mq_ops ublk_mq_ops = {
14701470
.timeout = ublk_timeout,
14711471
};
14721472

1473+
static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
1474+
{
1475+
int i;
1476+
1477+
/* All old ioucmds have to be completed */
1478+
ubq->nr_io_ready = 0;
1479+
1480+
/*
1481+
* old daemon is PF_EXITING, put it now
1482+
*
1483+
* It could be NULL in case of closing one quisced device.
1484+
*/
1485+
if (ubq->ubq_daemon)
1486+
put_task_struct(ubq->ubq_daemon);
1487+
/* We have to reset it to NULL, otherwise ub won't accept new FETCH_REQ */
1488+
ubq->ubq_daemon = NULL;
1489+
ubq->timeout = false;
1490+
1491+
for (i = 0; i < ubq->q_depth; i++) {
1492+
struct ublk_io *io = &ubq->ios[i];
1493+
1494+
/*
1495+
* UBLK_IO_FLAG_CANCELED is kept for avoiding to touch
1496+
* io->cmd
1497+
*/
1498+
io->flags &= UBLK_IO_FLAG_CANCELED;
1499+
io->cmd = NULL;
1500+
io->addr = 0;
1501+
}
1502+
}
1503+
14731504
static int ublk_ch_open(struct inode *inode, struct file *filp)
14741505
{
14751506
struct ublk_device *ub = container_of(inode->i_cdev,
@@ -1481,10 +1512,26 @@ static int ublk_ch_open(struct inode *inode, struct file *filp)
14811512
return 0;
14821513
}
14831514

1515+
static void ublk_reset_ch_dev(struct ublk_device *ub)
1516+
{
1517+
int i;
1518+
1519+
for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
1520+
ublk_queue_reinit(ub, ublk_get_queue(ub, i));
1521+
1522+
/* set to NULL, otherwise new ubq_daemon cannot mmap the io_cmd_buf */
1523+
ub->mm = NULL;
1524+
ub->nr_queues_ready = 0;
1525+
ub->nr_privileged_daemon = 0;
1526+
}
1527+
14841528
static int ublk_ch_release(struct inode *inode, struct file *filp)
14851529
{
14861530
struct ublk_device *ub = filp->private_data;
14871531

1532+
/* all uring_cmd has been done now, reset device & ubq */
1533+
ublk_reset_ch_dev(ub);
1534+
14881535
clear_bit(UB_STATE_OPEN, &ub->state);
14891536
return 0;
14901537
}
@@ -1831,6 +1878,24 @@ static void ublk_nosrv_work(struct work_struct *work)
18311878
ublk_cancel_dev(ub);
18321879
}
18331880

1881+
/* reset ublk io_uring queue & io flags */
1882+
static void ublk_reset_io_flags(struct ublk_device *ub)
1883+
{
1884+
int i, j;
1885+
1886+
for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
1887+
struct ublk_queue *ubq = ublk_get_queue(ub, i);
1888+
1889+
/* UBLK_IO_FLAG_CANCELED can be cleared now */
1890+
spin_lock(&ubq->cancel_lock);
1891+
for (j = 0; j < ubq->q_depth; j++)
1892+
ubq->ios[j].flags &= ~UBLK_IO_FLAG_CANCELED;
1893+
spin_unlock(&ubq->cancel_lock);
1894+
ubq->canceling = false;
1895+
ubq->fail_io = false;
1896+
}
1897+
}
1898+
18341899
/* device can only be started after all IOs are ready */
18351900
static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq)
18361901
__must_hold(&ub->mutex)
@@ -1844,8 +1909,12 @@ static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq)
18441909
if (capable(CAP_SYS_ADMIN))
18451910
ub->nr_privileged_daemon++;
18461911
}
1847-
if (ub->nr_queues_ready == ub->dev_info.nr_hw_queues)
1912+
1913+
if (ub->nr_queues_ready == ub->dev_info.nr_hw_queues) {
1914+
/* now we are ready for handling ublk io request */
1915+
ublk_reset_io_flags(ub);
18481916
complete_all(&ub->completion);
1917+
}
18491918
}
18501919

18511920
static void ublk_handle_need_get_data(struct ublk_device *ub, int q_id,
@@ -2954,41 +3023,14 @@ static int ublk_ctrl_set_params(struct ublk_device *ub,
29543023
return ret;
29553024
}
29563025

2957-
static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
2958-
{
2959-
int i;
2960-
2961-
WARN_ON_ONCE(!(ubq->ubq_daemon && ubq_daemon_is_dying(ubq)));
2962-
2963-
/* All old ioucmds have to be completed */
2964-
ubq->nr_io_ready = 0;
2965-
/* old daemon is PF_EXITING, put it now */
2966-
put_task_struct(ubq->ubq_daemon);
2967-
/* We have to reset it to NULL, otherwise ub won't accept new FETCH_REQ */
2968-
ubq->ubq_daemon = NULL;
2969-
ubq->timeout = false;
2970-
2971-
for (i = 0; i < ubq->q_depth; i++) {
2972-
struct ublk_io *io = &ubq->ios[i];
2973-
2974-
/* forget everything now and be ready for new FETCH_REQ */
2975-
io->flags = 0;
2976-
io->cmd = NULL;
2977-
io->addr = 0;
2978-
}
2979-
}
2980-
29813026
static int ublk_ctrl_start_recovery(struct ublk_device *ub,
29823027
const struct ublksrv_ctrl_cmd *header)
29833028
{
29843029
int ret = -EINVAL;
2985-
int i;
29863030

29873031
mutex_lock(&ub->mutex);
29883032
if (ublk_nosrv_should_stop_dev(ub))
29893033
goto out_unlock;
2990-
if (!ub->nr_queues_ready)
2991-
goto out_unlock;
29923034
/*
29933035
* START_RECOVERY is only allowd after:
29943036
*
@@ -3012,12 +3054,6 @@ static int ublk_ctrl_start_recovery(struct ublk_device *ub,
30123054
goto out_unlock;
30133055
}
30143056
pr_devel("%s: start recovery for dev id %d.\n", __func__, header->dev_id);
3015-
for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
3016-
ublk_queue_reinit(ub, ublk_get_queue(ub, i));
3017-
/* set to NULL, otherwise new ubq_daemon cannot mmap the io_cmd_buf */
3018-
ub->mm = NULL;
3019-
ub->nr_queues_ready = 0;
3020-
ub->nr_privileged_daemon = 0;
30213057
init_completion(&ub->completion);
30223058
ret = 0;
30233059
out_unlock:
@@ -3030,7 +3066,6 @@ static int ublk_ctrl_end_recovery(struct ublk_device *ub,
30303066
{
30313067
int ublksrv_pid = (int)header->data[0];
30323068
int ret = -EINVAL;
3033-
int i;
30343069

30353070
pr_devel("%s: Waiting for new ubq_daemons(nr: %d) are ready, dev id %d...\n",
30363071
__func__, ub->dev_info.nr_hw_queues, header->dev_id);
@@ -3050,22 +3085,10 @@ static int ublk_ctrl_end_recovery(struct ublk_device *ub,
30503085
goto out_unlock;
30513086
}
30523087
ub->dev_info.ublksrv_pid = ublksrv_pid;
3088+
ub->dev_info.state = UBLK_S_DEV_LIVE;
30533089
pr_devel("%s: new ublksrv_pid %d, dev id %d\n",
30543090
__func__, ublksrv_pid, header->dev_id);
3055-
3056-
blk_mq_quiesce_queue(ub->ub_disk->queue);
3057-
ub->dev_info.state = UBLK_S_DEV_LIVE;
3058-
for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
3059-
struct ublk_queue *ubq = ublk_get_queue(ub, i);
3060-
3061-
ubq->canceling = false;
3062-
ubq->fail_io = false;
3063-
}
3064-
blk_mq_unquiesce_queue(ub->ub_disk->queue);
3065-
pr_devel("%s: queue unquiesced, dev id %d.\n",
3066-
__func__, header->dev_id);
30673091
blk_mq_kick_requeue_list(ub->ub_disk->queue);
3068-
30693092
ret = 0;
30703093
out_unlock:
30713094
mutex_unlock(&ub->mutex);

0 commit comments

Comments
 (0)