@@ -589,10 +589,11 @@ static inline int was_interrupted(int result)
589
589
}
590
590
591
591
/*
592
- * Returns BLK_STS_RESOURCE if the caller should retry after a delay. Returns
593
- * -EAGAIN if the caller should requeue @cmd. Returns -EIO if sending failed.
592
+ * Returns BLK_STS_RESOURCE if the caller should retry after a delay.
593
+ * Returns BLK_STS_IOERR if sending failed.
594
594
*/
595
- static int nbd_send_cmd (struct nbd_device * nbd , struct nbd_cmd * cmd , int index )
595
+ static blk_status_t nbd_send_cmd (struct nbd_device * nbd , struct nbd_cmd * cmd ,
596
+ int index )
596
597
{
597
598
struct request * req = blk_mq_rq_from_pdu (cmd );
598
599
struct nbd_config * config = nbd -> config ;
@@ -614,13 +615,13 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
614
615
615
616
type = req_to_nbd_cmd_type (req );
616
617
if (type == U32_MAX )
617
- return - EIO ;
618
+ return BLK_STS_IOERR ;
618
619
619
620
if (rq_data_dir (req ) == WRITE &&
620
621
(config -> flags & NBD_FLAG_READ_ONLY )) {
621
622
dev_err_ratelimited (disk_to_dev (nbd -> disk ),
622
623
"Write on read-only\n" );
623
- return - EIO ;
624
+ return BLK_STS_IOERR ;
624
625
}
625
626
626
627
if (req -> cmd_flags & REQ_FUA )
@@ -674,11 +675,11 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
674
675
nsock -> sent = sent ;
675
676
}
676
677
set_bit (NBD_CMD_REQUEUED , & cmd -> flags );
677
- return ( __force int ) BLK_STS_RESOURCE ;
678
+ return BLK_STS_RESOURCE ;
678
679
}
679
680
dev_err_ratelimited (disk_to_dev (nbd -> disk ),
680
681
"Send control failed (result %d)\n" , result );
681
- return - EAGAIN ;
682
+ goto requeue ;
682
683
}
683
684
send_pages :
684
685
if (type != NBD_CMD_WRITE )
@@ -715,12 +716,12 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
715
716
nsock -> pending = req ;
716
717
nsock -> sent = sent ;
717
718
set_bit (NBD_CMD_REQUEUED , & cmd -> flags );
718
- return ( __force int ) BLK_STS_RESOURCE ;
719
+ return BLK_STS_RESOURCE ;
719
720
}
720
721
dev_err (disk_to_dev (nbd -> disk ),
721
722
"Send data failed (result %d)\n" ,
722
723
result );
723
- return - EAGAIN ;
724
+ goto requeue ;
724
725
}
725
726
/*
726
727
* The completion might already have come in,
@@ -737,7 +738,16 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
737
738
trace_nbd_payload_sent (req , handle );
738
739
nsock -> pending = NULL ;
739
740
nsock -> sent = 0 ;
740
- return 0 ;
741
+ __set_bit (NBD_CMD_INFLIGHT , & cmd -> flags );
742
+ return BLK_STS_OK ;
743
+
744
+ requeue :
745
+ /* retry on a different socket */
746
+ dev_err_ratelimited (disk_to_dev (nbd -> disk ),
747
+ "Request send failed, requeueing\n" );
748
+ nbd_mark_nsock_dead (nbd , nsock , 1 );
749
+ nbd_requeue_cmd (cmd );
750
+ return BLK_STS_OK ;
741
751
}
742
752
743
753
static int nbd_read_reply (struct nbd_device * nbd , struct socket * sock ,
@@ -1018,7 +1028,7 @@ static blk_status_t nbd_handle_cmd(struct nbd_cmd *cmd, int index)
1018
1028
struct nbd_device * nbd = cmd -> nbd ;
1019
1029
struct nbd_config * config ;
1020
1030
struct nbd_sock * nsock ;
1021
- int ret ;
1031
+ blk_status_t ret ;
1022
1032
1023
1033
lockdep_assert_held (& cmd -> lock );
1024
1034
@@ -1072,28 +1082,11 @@ static blk_status_t nbd_handle_cmd(struct nbd_cmd *cmd, int index)
1072
1082
ret = BLK_STS_OK ;
1073
1083
goto out ;
1074
1084
}
1075
- /*
1076
- * Some failures are related to the link going down, so anything that
1077
- * returns EAGAIN can be retried on a different socket.
1078
- */
1079
1085
ret = nbd_send_cmd (nbd , cmd , index );
1080
- /*
1081
- * Access to this flag is protected by cmd->lock, thus it's safe to set
1082
- * the flag after nbd_send_cmd() succeed to send request to server.
1083
- */
1084
- if (!ret )
1085
- __set_bit (NBD_CMD_INFLIGHT , & cmd -> flags );
1086
- else if (ret == - EAGAIN ) {
1087
- dev_err_ratelimited (disk_to_dev (nbd -> disk ),
1088
- "Request send failed, requeueing\n" );
1089
- nbd_mark_nsock_dead (nbd , nsock , 1 );
1090
- nbd_requeue_cmd (cmd );
1091
- ret = BLK_STS_OK ;
1092
- }
1093
1086
out :
1094
1087
mutex_unlock (& nsock -> tx_lock );
1095
1088
nbd_config_put (nbd );
1096
- return ret < 0 ? BLK_STS_IOERR : ( __force blk_status_t ) ret ;
1089
+ return ret ;
1097
1090
}
1098
1091
1099
1092
static blk_status_t nbd_queue_rq (struct blk_mq_hw_ctx * hctx ,
0 commit comments