@@ -1278,6 +1278,7 @@ static void io_refs_resurrect(struct percpu_ref *ref, struct completion *compl)
1278
1278
1279
1279
static bool io_match_task (struct io_kiocb * head , struct task_struct * task ,
1280
1280
bool cancel_all )
1281
+ __must_hold (& req - > ctx - > timeout_lock )
1281
1282
{
1282
1283
struct io_kiocb * req ;
1283
1284
@@ -1293,6 +1294,44 @@ static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
1293
1294
return false;
1294
1295
}
1295
1296
1297
+ static bool io_match_linked (struct io_kiocb * head )
1298
+ {
1299
+ struct io_kiocb * req ;
1300
+
1301
+ io_for_each_link (req , head ) {
1302
+ if (req -> flags & REQ_F_INFLIGHT )
1303
+ return true;
1304
+ }
1305
+ return false;
1306
+ }
1307
+
1308
+ /*
1309
+ * As io_match_task() but protected against racing with linked timeouts.
1310
+ * User must not hold timeout_lock.
1311
+ */
1312
+ static bool io_match_task_safe (struct io_kiocb * head , struct task_struct * task ,
1313
+ bool cancel_all )
1314
+ {
1315
+ bool matched ;
1316
+
1317
+ if (task && head -> task != task )
1318
+ return false;
1319
+ if (cancel_all )
1320
+ return true;
1321
+
1322
+ if (head -> flags & REQ_F_LINK_TIMEOUT ) {
1323
+ struct io_ring_ctx * ctx = head -> ctx ;
1324
+
1325
+ /* protect against races with linked timeouts */
1326
+ spin_lock_irq (& ctx -> timeout_lock );
1327
+ matched = io_match_linked (head );
1328
+ spin_unlock_irq (& ctx -> timeout_lock );
1329
+ } else {
1330
+ matched = io_match_linked (head );
1331
+ }
1332
+ return matched ;
1333
+ }
1334
+
1296
1335
static inline bool req_has_async_data (struct io_kiocb * req )
1297
1336
{
1298
1337
return req -> flags & REQ_F_ASYNC_DATA ;
@@ -4327,6 +4366,7 @@ static int __io_remove_buffers(struct io_ring_ctx *ctx, struct io_buffer *buf,
4327
4366
kfree (nxt );
4328
4367
if (++ i == nbufs )
4329
4368
return i ;
4369
+ cond_resched ();
4330
4370
}
4331
4371
i ++ ;
4332
4372
kfree (buf );
@@ -5699,17 +5739,15 @@ static __cold bool io_poll_remove_all(struct io_ring_ctx *ctx,
5699
5739
int posted = 0 , i ;
5700
5740
5701
5741
spin_lock (& ctx -> completion_lock );
5702
- spin_lock_irq (& ctx -> timeout_lock );
5703
5742
for (i = 0 ; i < (1U << ctx -> cancel_hash_bits ); i ++ ) {
5704
5743
struct hlist_head * list ;
5705
5744
5706
5745
list = & ctx -> cancel_hash [i ];
5707
5746
hlist_for_each_entry_safe (req , tmp , list , hash_node ) {
5708
- if (io_match_task (req , tsk , cancel_all ))
5747
+ if (io_match_task_safe (req , tsk , cancel_all ))
5709
5748
posted += io_poll_remove_one (req );
5710
5749
}
5711
5750
}
5712
- spin_unlock_irq (& ctx -> timeout_lock );
5713
5751
spin_unlock (& ctx -> completion_lock );
5714
5752
5715
5753
if (posted )
@@ -6158,6 +6196,9 @@ static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
6158
6196
if (get_timespec64 (& data -> ts , u64_to_user_ptr (sqe -> addr )))
6159
6197
return - EFAULT ;
6160
6198
6199
+ if (data -> ts .tv_sec < 0 || data -> ts .tv_nsec < 0 )
6200
+ return - EINVAL ;
6201
+
6161
6202
data -> mode = io_translate_timeout_mode (flags );
6162
6203
hrtimer_init (& data -> timer , io_timeout_get_clock (data ), data -> mode );
6163
6204
@@ -6882,10 +6923,11 @@ static inline struct file *io_file_get(struct io_ring_ctx *ctx,
6882
6923
static void io_req_task_link_timeout (struct io_kiocb * req , bool * locked )
6883
6924
{
6884
6925
struct io_kiocb * prev = req -> timeout .prev ;
6885
- int ret ;
6926
+ int ret = - ENOENT ;
6886
6927
6887
6928
if (prev ) {
6888
- ret = io_try_cancel_userdata (req , prev -> user_data );
6929
+ if (!(req -> task -> flags & PF_EXITING ))
6930
+ ret = io_try_cancel_userdata (req , prev -> user_data );
6889
6931
io_req_complete_post (req , ret ?: - ETIME , 0 );
6890
6932
io_put_req (prev );
6891
6933
} else {
@@ -9257,10 +9299,8 @@ static void io_destroy_buffers(struct io_ring_ctx *ctx)
9257
9299
struct io_buffer * buf ;
9258
9300
unsigned long index ;
9259
9301
9260
- xa_for_each (& ctx -> io_buffers , index , buf ) {
9302
+ xa_for_each (& ctx -> io_buffers , index , buf )
9261
9303
__io_remove_buffers (ctx , buf , index , -1U );
9262
- cond_resched ();
9263
- }
9264
9304
}
9265
9305
9266
9306
static void io_req_caches_free (struct io_ring_ctx * ctx )
@@ -9564,19 +9604,8 @@ static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
9564
9604
{
9565
9605
struct io_kiocb * req = container_of (work , struct io_kiocb , work );
9566
9606
struct io_task_cancel * cancel = data ;
9567
- bool ret ;
9568
-
9569
- if (!cancel -> all && (req -> flags & REQ_F_LINK_TIMEOUT )) {
9570
- struct io_ring_ctx * ctx = req -> ctx ;
9571
9607
9572
- /* protect against races with linked timeouts */
9573
- spin_lock_irq (& ctx -> timeout_lock );
9574
- ret = io_match_task (req , cancel -> task , cancel -> all );
9575
- spin_unlock_irq (& ctx -> timeout_lock );
9576
- } else {
9577
- ret = io_match_task (req , cancel -> task , cancel -> all );
9578
- }
9579
- return ret ;
9608
+ return io_match_task_safe (req , cancel -> task , cancel -> all );
9580
9609
}
9581
9610
9582
9611
static __cold bool io_cancel_defer_files (struct io_ring_ctx * ctx ,
@@ -9587,14 +9616,12 @@ static __cold bool io_cancel_defer_files(struct io_ring_ctx *ctx,
9587
9616
LIST_HEAD (list );
9588
9617
9589
9618
spin_lock (& ctx -> completion_lock );
9590
- spin_lock_irq (& ctx -> timeout_lock );
9591
9619
list_for_each_entry_reverse (de , & ctx -> defer_list , list ) {
9592
- if (io_match_task (de -> req , task , cancel_all )) {
9620
+ if (io_match_task_safe (de -> req , task , cancel_all )) {
9593
9621
list_cut_position (& list , & ctx -> defer_list , & de -> list );
9594
9622
break ;
9595
9623
}
9596
9624
}
9597
- spin_unlock_irq (& ctx -> timeout_lock );
9598
9625
spin_unlock (& ctx -> completion_lock );
9599
9626
if (list_empty (& list ))
9600
9627
return false;
0 commit comments