@@ -1074,7 +1074,7 @@ static inline struct ublk_uring_cmd_pdu *ublk_get_uring_cmd_pdu(
1074
1074
1075
1075
static inline bool ubq_daemon_is_dying (struct ublk_queue * ubq )
1076
1076
{
1077
- return ubq -> ubq_daemon -> flags & PF_EXITING ;
1077
+ return ! ubq -> ubq_daemon || ubq -> ubq_daemon -> flags & PF_EXITING ;
1078
1078
}
1079
1079
1080
1080
/* todo: handle partial completion */
@@ -1470,6 +1470,37 @@ static const struct blk_mq_ops ublk_mq_ops = {
1470
1470
.timeout = ublk_timeout ,
1471
1471
};
1472
1472
1473
+ static void ublk_queue_reinit (struct ublk_device * ub , struct ublk_queue * ubq )
1474
+ {
1475
+ int i ;
1476
+
1477
+ /* All old ioucmds have to be completed */
1478
+ ubq -> nr_io_ready = 0 ;
1479
+
1480
+ /*
1481
+ * old daemon is PF_EXITING, put it now
1482
+ *
1483
+ * It could be NULL in case of closing one quisced device.
1484
+ */
1485
+ if (ubq -> ubq_daemon )
1486
+ put_task_struct (ubq -> ubq_daemon );
1487
+ /* We have to reset it to NULL, otherwise ub won't accept new FETCH_REQ */
1488
+ ubq -> ubq_daemon = NULL ;
1489
+ ubq -> timeout = false;
1490
+
1491
+ for (i = 0 ; i < ubq -> q_depth ; i ++ ) {
1492
+ struct ublk_io * io = & ubq -> ios [i ];
1493
+
1494
+ /*
1495
+ * UBLK_IO_FLAG_CANCELED is kept for avoiding to touch
1496
+ * io->cmd
1497
+ */
1498
+ io -> flags &= UBLK_IO_FLAG_CANCELED ;
1499
+ io -> cmd = NULL ;
1500
+ io -> addr = 0 ;
1501
+ }
1502
+ }
1503
+
1473
1504
static int ublk_ch_open (struct inode * inode , struct file * filp )
1474
1505
{
1475
1506
struct ublk_device * ub = container_of (inode -> i_cdev ,
@@ -1481,10 +1512,26 @@ static int ublk_ch_open(struct inode *inode, struct file *filp)
1481
1512
return 0 ;
1482
1513
}
1483
1514
1515
+ static void ublk_reset_ch_dev (struct ublk_device * ub )
1516
+ {
1517
+ int i ;
1518
+
1519
+ for (i = 0 ; i < ub -> dev_info .nr_hw_queues ; i ++ )
1520
+ ublk_queue_reinit (ub , ublk_get_queue (ub , i ));
1521
+
1522
+ /* set to NULL, otherwise new ubq_daemon cannot mmap the io_cmd_buf */
1523
+ ub -> mm = NULL ;
1524
+ ub -> nr_queues_ready = 0 ;
1525
+ ub -> nr_privileged_daemon = 0 ;
1526
+ }
1527
+
1484
1528
static int ublk_ch_release (struct inode * inode , struct file * filp )
1485
1529
{
1486
1530
struct ublk_device * ub = filp -> private_data ;
1487
1531
1532
+ /* all uring_cmd has been done now, reset device & ubq */
1533
+ ublk_reset_ch_dev (ub );
1534
+
1488
1535
clear_bit (UB_STATE_OPEN , & ub -> state );
1489
1536
return 0 ;
1490
1537
}
@@ -1831,6 +1878,24 @@ static void ublk_nosrv_work(struct work_struct *work)
1831
1878
ublk_cancel_dev (ub );
1832
1879
}
1833
1880
1881
+ /* reset ublk io_uring queue & io flags */
1882
+ static void ublk_reset_io_flags (struct ublk_device * ub )
1883
+ {
1884
+ int i , j ;
1885
+
1886
+ for (i = 0 ; i < ub -> dev_info .nr_hw_queues ; i ++ ) {
1887
+ struct ublk_queue * ubq = ublk_get_queue (ub , i );
1888
+
1889
+ /* UBLK_IO_FLAG_CANCELED can be cleared now */
1890
+ spin_lock (& ubq -> cancel_lock );
1891
+ for (j = 0 ; j < ubq -> q_depth ; j ++ )
1892
+ ubq -> ios [j ].flags &= ~UBLK_IO_FLAG_CANCELED ;
1893
+ spin_unlock (& ubq -> cancel_lock );
1894
+ ubq -> canceling = false;
1895
+ ubq -> fail_io = false;
1896
+ }
1897
+ }
1898
+
1834
1899
/* device can only be started after all IOs are ready */
1835
1900
static void ublk_mark_io_ready (struct ublk_device * ub , struct ublk_queue * ubq )
1836
1901
__must_hold (& ub - > mutex )
@@ -1844,8 +1909,12 @@ static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq)
1844
1909
if (capable (CAP_SYS_ADMIN ))
1845
1910
ub -> nr_privileged_daemon ++ ;
1846
1911
}
1847
- if (ub -> nr_queues_ready == ub -> dev_info .nr_hw_queues )
1912
+
1913
+ if (ub -> nr_queues_ready == ub -> dev_info .nr_hw_queues ) {
1914
+ /* now we are ready for handling ublk io request */
1915
+ ublk_reset_io_flags (ub );
1848
1916
complete_all (& ub -> completion );
1917
+ }
1849
1918
}
1850
1919
1851
1920
static void ublk_handle_need_get_data (struct ublk_device * ub , int q_id ,
@@ -2954,41 +3023,14 @@ static int ublk_ctrl_set_params(struct ublk_device *ub,
2954
3023
return ret ;
2955
3024
}
2956
3025
2957
- static void ublk_queue_reinit (struct ublk_device * ub , struct ublk_queue * ubq )
2958
- {
2959
- int i ;
2960
-
2961
- WARN_ON_ONCE (!(ubq -> ubq_daemon && ubq_daemon_is_dying (ubq )));
2962
-
2963
- /* All old ioucmds have to be completed */
2964
- ubq -> nr_io_ready = 0 ;
2965
- /* old daemon is PF_EXITING, put it now */
2966
- put_task_struct (ubq -> ubq_daemon );
2967
- /* We have to reset it to NULL, otherwise ub won't accept new FETCH_REQ */
2968
- ubq -> ubq_daemon = NULL ;
2969
- ubq -> timeout = false;
2970
-
2971
- for (i = 0 ; i < ubq -> q_depth ; i ++ ) {
2972
- struct ublk_io * io = & ubq -> ios [i ];
2973
-
2974
- /* forget everything now and be ready for new FETCH_REQ */
2975
- io -> flags = 0 ;
2976
- io -> cmd = NULL ;
2977
- io -> addr = 0 ;
2978
- }
2979
- }
2980
-
2981
3026
static int ublk_ctrl_start_recovery (struct ublk_device * ub ,
2982
3027
const struct ublksrv_ctrl_cmd * header )
2983
3028
{
2984
3029
int ret = - EINVAL ;
2985
- int i ;
2986
3030
2987
3031
mutex_lock (& ub -> mutex );
2988
3032
if (ublk_nosrv_should_stop_dev (ub ))
2989
3033
goto out_unlock ;
2990
- if (!ub -> nr_queues_ready )
2991
- goto out_unlock ;
2992
3034
/*
2993
3035
* START_RECOVERY is only allowd after:
2994
3036
*
@@ -3012,12 +3054,6 @@ static int ublk_ctrl_start_recovery(struct ublk_device *ub,
3012
3054
goto out_unlock ;
3013
3055
}
3014
3056
pr_devel ("%s: start recovery for dev id %d.\n" , __func__ , header -> dev_id );
3015
- for (i = 0 ; i < ub -> dev_info .nr_hw_queues ; i ++ )
3016
- ublk_queue_reinit (ub , ublk_get_queue (ub , i ));
3017
- /* set to NULL, otherwise new ubq_daemon cannot mmap the io_cmd_buf */
3018
- ub -> mm = NULL ;
3019
- ub -> nr_queues_ready = 0 ;
3020
- ub -> nr_privileged_daemon = 0 ;
3021
3057
init_completion (& ub -> completion );
3022
3058
ret = 0 ;
3023
3059
out_unlock :
@@ -3030,7 +3066,6 @@ static int ublk_ctrl_end_recovery(struct ublk_device *ub,
3030
3066
{
3031
3067
int ublksrv_pid = (int )header -> data [0 ];
3032
3068
int ret = - EINVAL ;
3033
- int i ;
3034
3069
3035
3070
pr_devel ("%s: Waiting for new ubq_daemons(nr: %d) are ready, dev id %d...\n" ,
3036
3071
__func__ , ub -> dev_info .nr_hw_queues , header -> dev_id );
@@ -3050,22 +3085,10 @@ static int ublk_ctrl_end_recovery(struct ublk_device *ub,
3050
3085
goto out_unlock ;
3051
3086
}
3052
3087
ub -> dev_info .ublksrv_pid = ublksrv_pid ;
3088
+ ub -> dev_info .state = UBLK_S_DEV_LIVE ;
3053
3089
pr_devel ("%s: new ublksrv_pid %d, dev id %d\n" ,
3054
3090
__func__ , ublksrv_pid , header -> dev_id );
3055
-
3056
- blk_mq_quiesce_queue (ub -> ub_disk -> queue );
3057
- ub -> dev_info .state = UBLK_S_DEV_LIVE ;
3058
- for (i = 0 ; i < ub -> dev_info .nr_hw_queues ; i ++ ) {
3059
- struct ublk_queue * ubq = ublk_get_queue (ub , i );
3060
-
3061
- ubq -> canceling = false;
3062
- ubq -> fail_io = false;
3063
- }
3064
- blk_mq_unquiesce_queue (ub -> ub_disk -> queue );
3065
- pr_devel ("%s: queue unquiesced, dev id %d.\n" ,
3066
- __func__ , header -> dev_id );
3067
3091
blk_mq_kick_requeue_list (ub -> ub_disk -> queue );
3068
-
3069
3092
ret = 0 ;
3070
3093
out_unlock :
3071
3094
mutex_unlock (& ub -> mutex );
0 commit comments