@@ -338,6 +338,30 @@ static void nvme_log_error(struct request *req)
338
338
nr -> status & NVME_SC_DNR ? "DNR " : "" );
339
339
}
340
340
341
+ static void nvme_log_err_passthru (struct request * req )
342
+ {
343
+ struct nvme_ns * ns = req -> q -> queuedata ;
344
+ struct nvme_request * nr = nvme_req (req );
345
+
346
+ pr_err_ratelimited ("%s: %s(0x%x), %s (sct 0x%x / sc 0x%x) %s%s"
347
+ "cdw10=0x%x cdw11=0x%x cdw12=0x%x cdw13=0x%x cdw14=0x%x cdw15=0x%x\n" ,
348
+ ns ? ns -> disk -> disk_name : dev_name (nr -> ctrl -> device ),
349
+ ns ? nvme_get_opcode_str (nr -> cmd -> common .opcode ) :
350
+ nvme_get_admin_opcode_str (nr -> cmd -> common .opcode ),
351
+ nr -> cmd -> common .opcode ,
352
+ nvme_get_error_status_str (nr -> status ),
353
+ nr -> status >> 8 & 7 , /* Status Code Type */
354
+ nr -> status & 0xff , /* Status Code */
355
+ nr -> status & NVME_SC_MORE ? "MORE " : "" ,
356
+ nr -> status & NVME_SC_DNR ? "DNR " : "" ,
357
+ nr -> cmd -> common .cdw10 ,
358
+ nr -> cmd -> common .cdw11 ,
359
+ nr -> cmd -> common .cdw12 ,
360
+ nr -> cmd -> common .cdw13 ,
361
+ nr -> cmd -> common .cdw14 ,
362
+ nr -> cmd -> common .cdw14 );
363
+ }
364
+
341
365
enum nvme_disposition {
342
366
COMPLETE ,
343
367
RETRY ,
@@ -385,8 +409,12 @@ static inline void nvme_end_req(struct request *req)
385
409
{
386
410
blk_status_t status = nvme_error_status (nvme_req (req )-> status );
387
411
388
- if (unlikely (nvme_req (req )-> status && !(req -> rq_flags & RQF_QUIET )))
389
- nvme_log_error (req );
412
+ if (unlikely (nvme_req (req )-> status && !(req -> rq_flags & RQF_QUIET ))) {
413
+ if (blk_rq_is_passthrough (req ))
414
+ nvme_log_err_passthru (req );
415
+ else
416
+ nvme_log_error (req );
417
+ }
390
418
nvme_end_req_zoned (req );
391
419
nvme_trace_bio_complete (req );
392
420
if (req -> cmd_flags & REQ_NVME_MPATH )
@@ -679,10 +707,21 @@ static inline void nvme_clear_nvme_request(struct request *req)
679
707
/* initialize a passthrough request */
680
708
void nvme_init_request (struct request * req , struct nvme_command * cmd )
681
709
{
682
- if (req -> q -> queuedata )
710
+ struct nvme_request * nr = nvme_req (req );
711
+ bool logging_enabled ;
712
+
713
+ if (req -> q -> queuedata ) {
714
+ struct nvme_ns * ns = req -> q -> disk -> private_data ;
715
+
716
+ logging_enabled = ns -> passthru_err_log_enabled ;
683
717
req -> timeout = NVME_IO_TIMEOUT ;
684
- else /* no queuedata implies admin queue */
718
+ } else { /* no queuedata implies admin queue */
719
+ logging_enabled = nr -> ctrl -> passthru_err_log_enabled ;
685
720
req -> timeout = NVME_ADMIN_TIMEOUT ;
721
+ }
722
+
723
+ if (!logging_enabled )
724
+ req -> rq_flags |= RQF_QUIET ;
686
725
687
726
/* passthru commands should let the driver set the SGL flags */
688
727
cmd -> common .flags &= ~NVME_CMD_SGL_ALL ;
@@ -691,8 +730,7 @@ void nvme_init_request(struct request *req, struct nvme_command *cmd)
691
730
if (req -> mq_hctx -> type == HCTX_TYPE_POLL )
692
731
req -> cmd_flags |= REQ_POLLED ;
693
732
nvme_clear_nvme_request (req );
694
- req -> rq_flags |= RQF_QUIET ;
695
- memcpy (nvme_req (req )-> cmd , cmd , sizeof (* cmd ));
733
+ memcpy (nr -> cmd , cmd , sizeof (* cmd ));
696
734
}
697
735
EXPORT_SYMBOL_GPL (nvme_init_request );
698
736
@@ -721,7 +759,7 @@ blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
721
759
EXPORT_SYMBOL_GPL (nvme_fail_nonready_command );
722
760
723
761
bool __nvme_check_ready (struct nvme_ctrl * ctrl , struct request * rq ,
724
- bool queue_live )
762
+ bool queue_live , enum nvme_ctrl_state state )
725
763
{
726
764
struct nvme_request * req = nvme_req (rq );
727
765
@@ -742,7 +780,7 @@ bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
742
780
* command, which is require to set the queue live in the
743
781
* appropinquate states.
744
782
*/
745
- switch (nvme_ctrl_state ( ctrl ) ) {
783
+ switch (state ) {
746
784
case NVME_CTRL_CONNECTING :
747
785
if (blk_rq_is_passthrough (rq ) && nvme_is_fabrics (req -> cmd ) &&
748
786
(req -> cmd -> fabrics .fctype == nvme_fabrics_type_connect ||
@@ -1051,28 +1089,35 @@ EXPORT_SYMBOL_NS_GPL(nvme_execute_rq, NVME_TARGET_PASSTHRU);
1051
1089
*/
1052
1090
int __nvme_submit_sync_cmd (struct request_queue * q , struct nvme_command * cmd ,
1053
1091
union nvme_result * result , void * buffer , unsigned bufflen ,
1054
- int qid , int at_head , blk_mq_req_flags_t flags )
1092
+ int qid , nvme_submit_flags_t flags )
1055
1093
{
1056
1094
struct request * req ;
1057
1095
int ret ;
1096
+ blk_mq_req_flags_t blk_flags = 0 ;
1058
1097
1098
+ if (flags & NVME_SUBMIT_NOWAIT )
1099
+ blk_flags |= BLK_MQ_REQ_NOWAIT ;
1100
+ if (flags & NVME_SUBMIT_RESERVED )
1101
+ blk_flags |= BLK_MQ_REQ_RESERVED ;
1059
1102
if (qid == NVME_QID_ANY )
1060
- req = blk_mq_alloc_request (q , nvme_req_op (cmd ), flags );
1103
+ req = blk_mq_alloc_request (q , nvme_req_op (cmd ), blk_flags );
1061
1104
else
1062
- req = blk_mq_alloc_request_hctx (q , nvme_req_op (cmd ), flags ,
1105
+ req = blk_mq_alloc_request_hctx (q , nvme_req_op (cmd ), blk_flags ,
1063
1106
qid - 1 );
1064
1107
1065
1108
if (IS_ERR (req ))
1066
1109
return PTR_ERR (req );
1067
1110
nvme_init_request (req , cmd );
1111
+ if (flags & NVME_SUBMIT_RETRY )
1112
+ req -> cmd_flags &= ~REQ_FAILFAST_DRIVER ;
1068
1113
1069
1114
if (buffer && bufflen ) {
1070
1115
ret = blk_rq_map_kern (q , req , buffer , bufflen , GFP_KERNEL );
1071
1116
if (ret )
1072
1117
goto out ;
1073
1118
}
1074
1119
1075
- ret = nvme_execute_rq (req , at_head );
1120
+ ret = nvme_execute_rq (req , flags & NVME_SUBMIT_AT_HEAD );
1076
1121
if (result && ret >= 0 )
1077
1122
* result = nvme_req (req )-> result ;
1078
1123
out :
@@ -1085,7 +1130,7 @@ int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
1085
1130
void * buffer , unsigned bufflen )
1086
1131
{
1087
1132
return __nvme_submit_sync_cmd (q , cmd , NULL , buffer , bufflen ,
1088
- NVME_QID_ANY , 0 , 0 );
1133
+ NVME_QID_ANY , 0 );
1089
1134
}
1090
1135
EXPORT_SYMBOL_GPL (nvme_submit_sync_cmd );
1091
1136
@@ -1560,7 +1605,7 @@ static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid,
1560
1605
c .features .dword11 = cpu_to_le32 (dword11 );
1561
1606
1562
1607
ret = __nvme_submit_sync_cmd (dev -> admin_q , & c , & res ,
1563
- buffer , buflen , NVME_QID_ANY , 0 , 0 );
1608
+ buffer , buflen , NVME_QID_ANY , 0 );
1564
1609
if (ret >= 0 && result )
1565
1610
* result = le32_to_cpu (res .u32 );
1566
1611
return ret ;
@@ -2172,7 +2217,7 @@ static int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t l
2172
2217
cmd .common .cdw11 = cpu_to_le32 (len );
2173
2218
2174
2219
return __nvme_submit_sync_cmd (ctrl -> admin_q , & cmd , NULL , buffer , len ,
2175
- NVME_QID_ANY , 1 , 0 );
2220
+ NVME_QID_ANY , NVME_SUBMIT_AT_HEAD );
2176
2221
}
2177
2222
2178
2223
static void nvme_configure_opal (struct nvme_ctrl * ctrl , bool was_suspended )
@@ -3651,6 +3696,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info)
3651
3696
3652
3697
ns -> disk = disk ;
3653
3698
ns -> queue = disk -> queue ;
3699
+ ns -> passthru_err_log_enabled = false;
3654
3700
3655
3701
if (ctrl -> opts && ctrl -> opts -> data_digest )
3656
3702
blk_queue_flag_set (QUEUE_FLAG_STABLE_WRITES , ns -> queue );
@@ -3714,6 +3760,13 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info)
3714
3760
nvme_mpath_add_disk (ns , info -> anagrpid );
3715
3761
nvme_fault_inject_init (& ns -> fault_inject , ns -> disk -> disk_name );
3716
3762
3763
+ /*
3764
+ * Set ns->disk->device->driver_data to ns so we can access
3765
+ * ns->logging_enabled in nvme_passthru_err_log_enabled_store() and
3766
+ * nvme_passthru_err_log_enabled_show().
3767
+ */
3768
+ dev_set_drvdata (disk_to_dev (ns -> disk ), ns );
3769
+
3717
3770
return ;
3718
3771
3719
3772
out_cleanup_ns_from_list :
@@ -4514,6 +4567,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
4514
4567
int ret ;
4515
4568
4516
4569
WRITE_ONCE (ctrl -> state , NVME_CTRL_NEW );
4570
+ ctrl -> passthru_err_log_enabled = false;
4517
4571
clear_bit (NVME_CTRL_FAILFAST_EXPIRED , & ctrl -> flags );
4518
4572
spin_lock_init (& ctrl -> lock );
4519
4573
mutex_init (& ctrl -> scan_lock );
@@ -4851,5 +4905,6 @@ static void __exit nvme_core_exit(void)
4851
4905
4852
4906
MODULE_LICENSE ("GPL" );
4853
4907
MODULE_VERSION ("1.0" );
4908
+ MODULE_DESCRIPTION ("NVMe host core framework" );
4854
4909
module_init (nvme_core_init );
4855
4910
module_exit (nvme_core_exit );
0 commit comments