@@ -448,24 +448,6 @@ static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req)
448
448
return req -> link ;
449
449
}
450
450
451
- static inline struct io_kiocb * io_prep_linked_timeout (struct io_kiocb * req )
452
- {
453
- if (likely (!(req -> flags & REQ_F_ARM_LTIMEOUT )))
454
- return NULL ;
455
- return __io_prep_linked_timeout (req );
456
- }
457
-
458
- static noinline void __io_arm_ltimeout (struct io_kiocb * req )
459
- {
460
- io_queue_linked_timeout (__io_prep_linked_timeout (req ));
461
- }
462
-
463
- static inline void io_arm_ltimeout (struct io_kiocb * req )
464
- {
465
- if (unlikely (req -> flags & REQ_F_ARM_LTIMEOUT ))
466
- __io_arm_ltimeout (req );
467
- }
468
-
469
451
static void io_prep_async_work (struct io_kiocb * req )
470
452
{
471
453
const struct io_issue_def * def = & io_issue_defs [req -> opcode ];
@@ -518,7 +500,6 @@ static void io_prep_async_link(struct io_kiocb *req)
518
500
519
501
static void io_queue_iowq (struct io_kiocb * req )
520
502
{
521
- struct io_kiocb * link = io_prep_linked_timeout (req );
522
503
struct io_uring_task * tctx = req -> tctx ;
523
504
524
505
BUG_ON (!tctx );
@@ -543,8 +524,6 @@ static void io_queue_iowq(struct io_kiocb *req)
543
524
544
525
trace_io_uring_queue_async_work (req , io_wq_is_hashed (& req -> work ));
545
526
io_wq_enqueue (tctx -> io_wq , & req -> work );
546
- if (link )
547
- io_queue_linked_timeout (link );
548
527
}
549
528
550
529
static void io_req_queue_iowq_tw (struct io_kiocb * req , io_tw_token_t tw )
@@ -869,6 +848,14 @@ bool io_req_post_cqe(struct io_kiocb *req, s32 res, u32 cflags)
869
848
struct io_ring_ctx * ctx = req -> ctx ;
870
849
bool posted ;
871
850
851
+ /*
852
+ * If multishot has already posted deferred completions, ensure that
853
+ * those are flushed first before posting this one. If not, CQEs
854
+ * could get reordered.
855
+ */
856
+ if (!wq_list_empty (& ctx -> submit_state .compl_reqs ))
857
+ __io_submit_flush_completions (ctx );
858
+
872
859
lockdep_assert (!io_wq_current_is_worker ());
873
860
lockdep_assert_held (& ctx -> uring_lock );
874
861
@@ -1724,15 +1711,22 @@ static bool io_assign_file(struct io_kiocb *req, const struct io_issue_def *def,
1724
1711
return !!req -> file ;
1725
1712
}
1726
1713
1714
+ #define REQ_ISSUE_SLOW_FLAGS (REQ_F_CREDS | REQ_F_ARM_LTIMEOUT)
1715
+
1727
1716
static inline int __io_issue_sqe (struct io_kiocb * req ,
1728
1717
unsigned int issue_flags ,
1729
1718
const struct io_issue_def * def )
1730
1719
{
1731
1720
const struct cred * creds = NULL ;
1721
+ struct io_kiocb * link = NULL ;
1732
1722
int ret ;
1733
1723
1734
- if (unlikely ((req -> flags & REQ_F_CREDS ) && req -> creds != current_cred ()))
1735
- creds = override_creds (req -> creds );
1724
+ if (unlikely (req -> flags & REQ_ISSUE_SLOW_FLAGS )) {
1725
+ if ((req -> flags & REQ_F_CREDS ) && req -> creds != current_cred ())
1726
+ creds = override_creds (req -> creds );
1727
+ if (req -> flags & REQ_F_ARM_LTIMEOUT )
1728
+ link = __io_prep_linked_timeout (req );
1729
+ }
1736
1730
1737
1731
if (!def -> audit_skip )
1738
1732
audit_uring_entry (req -> opcode );
@@ -1742,8 +1736,12 @@ static inline int __io_issue_sqe(struct io_kiocb *req,
1742
1736
if (!def -> audit_skip )
1743
1737
audit_uring_exit (!ret , ret );
1744
1738
1745
- if (creds )
1746
- revert_creds (creds );
1739
+ if (unlikely (creds || link )) {
1740
+ if (creds )
1741
+ revert_creds (creds );
1742
+ if (link )
1743
+ io_queue_linked_timeout (link );
1744
+ }
1747
1745
1748
1746
return ret ;
1749
1747
}
@@ -1769,7 +1767,6 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
1769
1767
1770
1768
if (ret == IOU_ISSUE_SKIP_COMPLETE ) {
1771
1769
ret = 0 ;
1772
- io_arm_ltimeout (req );
1773
1770
1774
1771
/* If the op doesn't have a file, we're not polling for it */
1775
1772
if ((req -> ctx -> flags & IORING_SETUP_IOPOLL ) && def -> iopoll_queue )
@@ -1824,8 +1821,6 @@ void io_wq_submit_work(struct io_wq_work *work)
1824
1821
else
1825
1822
req_ref_get (req );
1826
1823
1827
- io_arm_ltimeout (req );
1828
-
1829
1824
/* either cancelled or io-wq is dying, so don't touch tctx->iowq */
1830
1825
if (atomic_read (& work -> flags ) & IO_WQ_WORK_CANCEL ) {
1831
1826
fail :
@@ -1941,15 +1936,11 @@ struct file *io_file_get_normal(struct io_kiocb *req, int fd)
1941
1936
static void io_queue_async (struct io_kiocb * req , int ret )
1942
1937
__must_hold (& req - > ctx - > uring_lock )
1943
1938
{
1944
- struct io_kiocb * linked_timeout ;
1945
-
1946
1939
if (ret != - EAGAIN || (req -> flags & REQ_F_NOWAIT )) {
1947
1940
io_req_defer_failed (req , ret );
1948
1941
return ;
1949
1942
}
1950
1943
1951
- linked_timeout = io_prep_linked_timeout (req );
1952
-
1953
1944
switch (io_arm_poll_handler (req , 0 )) {
1954
1945
case IO_APOLL_READY :
1955
1946
io_kbuf_recycle (req , 0 );
@@ -1962,9 +1953,6 @@ static void io_queue_async(struct io_kiocb *req, int ret)
1962
1953
case IO_APOLL_OK :
1963
1954
break ;
1964
1955
}
1965
-
1966
- if (linked_timeout )
1967
- io_queue_linked_timeout (linked_timeout );
1968
1956
}
1969
1957
1970
1958
static inline void io_queue_sqe (struct io_kiocb * req )
0 commit comments