@@ -736,7 +736,6 @@ enum {
736
736
REQ_F_BUFFER_SELECTED_BIT ,
737
737
REQ_F_COMPLETE_INLINE_BIT ,
738
738
REQ_F_REISSUE_BIT ,
739
- REQ_F_DONT_REISSUE_BIT ,
740
739
REQ_F_CREDS_BIT ,
741
740
REQ_F_REFCOUNT_BIT ,
742
741
REQ_F_ARM_LTIMEOUT_BIT ,
@@ -783,8 +782,6 @@ enum {
783
782
REQ_F_COMPLETE_INLINE = BIT (REQ_F_COMPLETE_INLINE_BIT ),
784
783
/* caller should reissue async */
785
784
REQ_F_REISSUE = BIT (REQ_F_REISSUE_BIT ),
786
- /* don't attempt request reissue, see io_rw_reissue() */
787
- REQ_F_DONT_REISSUE = BIT (REQ_F_DONT_REISSUE_BIT ),
788
785
/* supports async reads */
789
786
REQ_F_NOWAIT_READ = BIT (REQ_F_NOWAIT_READ_BIT ),
790
787
/* supports async writes */
@@ -2440,13 +2437,6 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
2440
2437
req = list_first_entry (done , struct io_kiocb , inflight_entry );
2441
2438
list_del (& req -> inflight_entry );
2442
2439
2443
- if (READ_ONCE (req -> result ) == - EAGAIN &&
2444
- !(req -> flags & REQ_F_DONT_REISSUE )) {
2445
- req -> iopoll_completed = 0 ;
2446
- io_req_task_queue_reissue (req );
2447
- continue ;
2448
- }
2449
-
2450
2440
__io_cqring_fill_event (ctx , req -> user_data , req -> result ,
2451
2441
io_put_rw_kbuf (req ));
2452
2442
(* nr_events )++ ;
@@ -2709,10 +2699,9 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
2709
2699
if (kiocb -> ki_flags & IOCB_WRITE )
2710
2700
kiocb_end_write (req );
2711
2701
if (unlikely (res != req -> result )) {
2712
- if (!(res == - EAGAIN && io_rw_should_reissue (req ) &&
2713
- io_resubmit_prep (req ))) {
2714
- req_set_fail (req );
2715
- req -> flags |= REQ_F_DONT_REISSUE ;
2702
+ if (res == - EAGAIN && io_rw_should_reissue (req )) {
2703
+ req -> flags |= REQ_F_REISSUE ;
2704
+ return ;
2716
2705
}
2717
2706
}
2718
2707
@@ -2926,7 +2915,6 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
2926
2915
{
2927
2916
struct io_kiocb * req = container_of (kiocb , struct io_kiocb , rw .kiocb );
2928
2917
struct io_async_rw * io = req -> async_data ;
2929
- bool check_reissue = kiocb -> ki_complete == io_complete_rw ;
2930
2918
2931
2919
/* add previously done IO, if any */
2932
2920
if (io && io -> bytes_done > 0 ) {
@@ -2938,19 +2926,27 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
2938
2926
2939
2927
if (req -> flags & REQ_F_CUR_POS )
2940
2928
req -> file -> f_pos = kiocb -> ki_pos ;
2941
- if (ret >= 0 && check_reissue )
2929
+ if (ret >= 0 && ( kiocb -> ki_complete == io_complete_rw ) )
2942
2930
__io_complete_rw (req , ret , 0 , issue_flags );
2943
2931
else
2944
2932
io_rw_done (kiocb , ret );
2945
2933
2946
- if (check_reissue && ( req -> flags & REQ_F_REISSUE ) ) {
2934
+ if (req -> flags & REQ_F_REISSUE ) {
2947
2935
req -> flags &= ~REQ_F_REISSUE ;
2948
2936
if (io_resubmit_prep (req )) {
2949
2937
io_req_task_queue_reissue (req );
2950
2938
} else {
2939
+ unsigned int cflags = io_put_rw_kbuf (req );
2940
+ struct io_ring_ctx * ctx = req -> ctx ;
2941
+
2951
2942
req_set_fail (req );
2952
- __io_req_complete (req , issue_flags , ret ,
2953
- io_put_rw_kbuf (req ));
2943
+ if (issue_flags & IO_URING_F_NONBLOCK ) {
2944
+ mutex_lock (& ctx -> uring_lock );
2945
+ __io_req_complete (req , issue_flags , ret , cflags );
2946
+ mutex_unlock (& ctx -> uring_lock );
2947
+ } else {
2948
+ __io_req_complete (req , issue_flags , ret , cflags );
2949
+ }
2954
2950
}
2955
2951
}
2956
2952
}
0 commit comments