@@ -1140,6 +1140,25 @@ static void ublk_complete_rq(struct kref *ref)
1140
1140
__ublk_complete_rq (req );
1141
1141
}
1142
1142
1143
+ static void ublk_do_fail_rq (struct request * req )
1144
+ {
1145
+ struct ublk_queue * ubq = req -> mq_hctx -> driver_data ;
1146
+
1147
+ if (ublk_nosrv_should_reissue_outstanding (ubq -> dev ))
1148
+ blk_mq_requeue_request (req , false);
1149
+ else
1150
+ __ublk_complete_rq (req );
1151
+ }
1152
+
1153
+ static void ublk_fail_rq_fn (struct kref * ref )
1154
+ {
1155
+ struct ublk_rq_data * data = container_of (ref , struct ublk_rq_data ,
1156
+ ref );
1157
+ struct request * req = blk_mq_rq_from_pdu (data );
1158
+
1159
+ ublk_do_fail_rq (req );
1160
+ }
1161
+
1143
1162
/*
1144
1163
* Since ublk_rq_task_work_cb always fails requests immediately during
1145
1164
* exiting, __ublk_fail_req() is only called from abort context during
@@ -1153,10 +1172,13 @@ static void __ublk_fail_req(struct ublk_queue *ubq, struct ublk_io *io,
1153
1172
{
1154
1173
WARN_ON_ONCE (io -> flags & UBLK_IO_FLAG_ACTIVE );
1155
1174
1156
- if (ublk_nosrv_should_reissue_outstanding (ubq -> dev ))
1157
- blk_mq_requeue_request (req , false);
1158
- else
1159
- ublk_put_req_ref (ubq , req );
1175
+ if (ublk_need_req_ref (ubq )) {
1176
+ struct ublk_rq_data * data = blk_mq_rq_to_pdu (req );
1177
+
1178
+ kref_put (& data -> ref , ublk_fail_rq_fn );
1179
+ } else {
1180
+ ublk_do_fail_rq (req );
1181
+ }
1160
1182
}
1161
1183
1162
1184
static void ubq_complete_io_cmd (struct ublk_io * io , int res ,
@@ -1349,7 +1371,8 @@ static enum blk_eh_timer_return ublk_timeout(struct request *rq)
1349
1371
return BLK_EH_RESET_TIMER ;
1350
1372
}
1351
1373
1352
- static blk_status_t ublk_prep_req (struct ublk_queue * ubq , struct request * rq )
1374
+ static blk_status_t ublk_prep_req (struct ublk_queue * ubq , struct request * rq ,
1375
+ bool check_cancel )
1353
1376
{
1354
1377
blk_status_t res ;
1355
1378
@@ -1368,7 +1391,7 @@ static blk_status_t ublk_prep_req(struct ublk_queue *ubq, struct request *rq)
1368
1391
if (ublk_nosrv_should_queue_io (ubq ) && unlikely (ubq -> force_abort ))
1369
1392
return BLK_STS_IOERR ;
1370
1393
1371
- if (unlikely (ubq -> canceling ))
1394
+ if (check_cancel && unlikely (ubq -> canceling ))
1372
1395
return BLK_STS_IOERR ;
1373
1396
1374
1397
/* fill iod to slot in io cmd buffer */
@@ -1387,7 +1410,7 @@ static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
1387
1410
struct request * rq = bd -> rq ;
1388
1411
blk_status_t res ;
1389
1412
1390
- res = ublk_prep_req (ubq , rq );
1413
+ res = ublk_prep_req (ubq , rq , false );
1391
1414
if (res != BLK_STS_OK )
1392
1415
return res ;
1393
1416
@@ -1419,7 +1442,7 @@ static void ublk_queue_rqs(struct rq_list *rqlist)
1419
1442
ublk_queue_cmd_list (ubq , & submit_list );
1420
1443
ubq = this_q ;
1421
1444
1422
- if (ublk_prep_req (ubq , req ) == BLK_STS_OK )
1445
+ if (ublk_prep_req (ubq , req , true ) == BLK_STS_OK )
1423
1446
rq_list_add_tail (& submit_list , req );
1424
1447
else
1425
1448
rq_list_add_tail (& requeue_list , req );
@@ -2413,9 +2436,9 @@ static struct ublk_device *ublk_get_device_from_id(int idx)
2413
2436
return ub ;
2414
2437
}
2415
2438
2416
- static int ublk_ctrl_start_dev (struct ublk_device * ub , struct io_uring_cmd * cmd )
2439
+ static int ublk_ctrl_start_dev (struct ublk_device * ub ,
2440
+ const struct ublksrv_ctrl_cmd * header )
2417
2441
{
2418
- const struct ublksrv_ctrl_cmd * header = io_uring_sqe_cmd (cmd -> sqe );
2419
2442
const struct ublk_param_basic * p = & ub -> params .basic ;
2420
2443
int ublksrv_pid = (int )header -> data [0 ];
2421
2444
struct queue_limits lim = {
@@ -2534,9 +2557,8 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
2534
2557
}
2535
2558
2536
2559
static int ublk_ctrl_get_queue_affinity (struct ublk_device * ub ,
2537
- struct io_uring_cmd * cmd )
2560
+ const struct ublksrv_ctrl_cmd * header )
2538
2561
{
2539
- const struct ublksrv_ctrl_cmd * header = io_uring_sqe_cmd (cmd -> sqe );
2540
2562
void __user * argp = (void __user * )(unsigned long )header -> addr ;
2541
2563
cpumask_var_t cpumask ;
2542
2564
unsigned long queue ;
@@ -2585,9 +2607,8 @@ static inline void ublk_dump_dev_info(struct ublksrv_ctrl_dev_info *info)
2585
2607
info -> nr_hw_queues , info -> queue_depth );
2586
2608
}
2587
2609
2588
- static int ublk_ctrl_add_dev (struct io_uring_cmd * cmd )
2610
+ static int ublk_ctrl_add_dev (const struct ublksrv_ctrl_cmd * header )
2589
2611
{
2590
- const struct ublksrv_ctrl_cmd * header = io_uring_sqe_cmd (cmd -> sqe );
2591
2612
void __user * argp = (void __user * )(unsigned long )header -> addr ;
2592
2613
struct ublksrv_ctrl_dev_info info ;
2593
2614
struct ublk_device * ub ;
@@ -2812,9 +2833,8 @@ static int ublk_ctrl_stop_dev(struct ublk_device *ub)
2812
2833
}
2813
2834
2814
2835
static int ublk_ctrl_get_dev_info (struct ublk_device * ub ,
2815
- struct io_uring_cmd * cmd )
2836
+ const struct ublksrv_ctrl_cmd * header )
2816
2837
{
2817
- const struct ublksrv_ctrl_cmd * header = io_uring_sqe_cmd (cmd -> sqe );
2818
2838
void __user * argp = (void __user * )(unsigned long )header -> addr ;
2819
2839
2820
2840
if (header -> len < sizeof (struct ublksrv_ctrl_dev_info ) || !header -> addr )
@@ -2843,9 +2863,8 @@ static void ublk_ctrl_fill_params_devt(struct ublk_device *ub)
2843
2863
}
2844
2864
2845
2865
static int ublk_ctrl_get_params (struct ublk_device * ub ,
2846
- struct io_uring_cmd * cmd )
2866
+ const struct ublksrv_ctrl_cmd * header )
2847
2867
{
2848
- const struct ublksrv_ctrl_cmd * header = io_uring_sqe_cmd (cmd -> sqe );
2849
2868
void __user * argp = (void __user * )(unsigned long )header -> addr ;
2850
2869
struct ublk_params_header ph ;
2851
2870
int ret ;
@@ -2874,9 +2893,8 @@ static int ublk_ctrl_get_params(struct ublk_device *ub,
2874
2893
}
2875
2894
2876
2895
static int ublk_ctrl_set_params (struct ublk_device * ub ,
2877
- struct io_uring_cmd * cmd )
2896
+ const struct ublksrv_ctrl_cmd * header )
2878
2897
{
2879
- const struct ublksrv_ctrl_cmd * header = io_uring_sqe_cmd (cmd -> sqe );
2880
2898
void __user * argp = (void __user * )(unsigned long )header -> addr ;
2881
2899
struct ublk_params_header ph ;
2882
2900
int ret = - EFAULT ;
@@ -2940,9 +2958,8 @@ static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
2940
2958
}
2941
2959
2942
2960
static int ublk_ctrl_start_recovery (struct ublk_device * ub ,
2943
- struct io_uring_cmd * cmd )
2961
+ const struct ublksrv_ctrl_cmd * header )
2944
2962
{
2945
- const struct ublksrv_ctrl_cmd * header = io_uring_sqe_cmd (cmd -> sqe );
2946
2963
int ret = - EINVAL ;
2947
2964
int i ;
2948
2965
@@ -2988,9 +3005,8 @@ static int ublk_ctrl_start_recovery(struct ublk_device *ub,
2988
3005
}
2989
3006
2990
3007
static int ublk_ctrl_end_recovery (struct ublk_device * ub ,
2991
- struct io_uring_cmd * cmd )
3008
+ const struct ublksrv_ctrl_cmd * header )
2992
3009
{
2993
- const struct ublksrv_ctrl_cmd * header = io_uring_sqe_cmd (cmd -> sqe );
2994
3010
int ublksrv_pid = (int )header -> data [0 ];
2995
3011
int ret = - EINVAL ;
2996
3012
int i ;
@@ -3037,9 +3053,8 @@ static int ublk_ctrl_end_recovery(struct ublk_device *ub,
3037
3053
return ret ;
3038
3054
}
3039
3055
3040
- static int ublk_ctrl_get_features (struct io_uring_cmd * cmd )
3056
+ static int ublk_ctrl_get_features (const struct ublksrv_ctrl_cmd * header )
3041
3057
{
3042
- const struct ublksrv_ctrl_cmd * header = io_uring_sqe_cmd (cmd -> sqe );
3043
3058
void __user * argp = (void __user * )(unsigned long )header -> addr ;
3044
3059
u64 features = UBLK_F_ALL ;
3045
3060
@@ -3178,7 +3193,7 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
3178
3193
goto out ;
3179
3194
3180
3195
if (cmd_op == UBLK_U_CMD_GET_FEATURES ) {
3181
- ret = ublk_ctrl_get_features (cmd );
3196
+ ret = ublk_ctrl_get_features (header );
3182
3197
goto out ;
3183
3198
}
3184
3199
@@ -3195,17 +3210,17 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
3195
3210
3196
3211
switch (_IOC_NR (cmd_op )) {
3197
3212
case UBLK_CMD_START_DEV :
3198
- ret = ublk_ctrl_start_dev (ub , cmd );
3213
+ ret = ublk_ctrl_start_dev (ub , header );
3199
3214
break ;
3200
3215
case UBLK_CMD_STOP_DEV :
3201
3216
ret = ublk_ctrl_stop_dev (ub );
3202
3217
break ;
3203
3218
case UBLK_CMD_GET_DEV_INFO :
3204
3219
case UBLK_CMD_GET_DEV_INFO2 :
3205
- ret = ublk_ctrl_get_dev_info (ub , cmd );
3220
+ ret = ublk_ctrl_get_dev_info (ub , header );
3206
3221
break ;
3207
3222
case UBLK_CMD_ADD_DEV :
3208
- ret = ublk_ctrl_add_dev (cmd );
3223
+ ret = ublk_ctrl_add_dev (header );
3209
3224
break ;
3210
3225
case UBLK_CMD_DEL_DEV :
3211
3226
ret = ublk_ctrl_del_dev (& ub , true);
@@ -3214,19 +3229,19 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
3214
3229
ret = ublk_ctrl_del_dev (& ub , false);
3215
3230
break ;
3216
3231
case UBLK_CMD_GET_QUEUE_AFFINITY :
3217
- ret = ublk_ctrl_get_queue_affinity (ub , cmd );
3232
+ ret = ublk_ctrl_get_queue_affinity (ub , header );
3218
3233
break ;
3219
3234
case UBLK_CMD_GET_PARAMS :
3220
- ret = ublk_ctrl_get_params (ub , cmd );
3235
+ ret = ublk_ctrl_get_params (ub , header );
3221
3236
break ;
3222
3237
case UBLK_CMD_SET_PARAMS :
3223
- ret = ublk_ctrl_set_params (ub , cmd );
3238
+ ret = ublk_ctrl_set_params (ub , header );
3224
3239
break ;
3225
3240
case UBLK_CMD_START_USER_RECOVERY :
3226
- ret = ublk_ctrl_start_recovery (ub , cmd );
3241
+ ret = ublk_ctrl_start_recovery (ub , header );
3227
3242
break ;
3228
3243
case UBLK_CMD_END_USER_RECOVERY :
3229
- ret = ublk_ctrl_end_recovery (ub , cmd );
3244
+ ret = ublk_ctrl_end_recovery (ub , header );
3230
3245
break ;
3231
3246
default :
3232
3247
ret = - EOPNOTSUPP ;
0 commit comments