112
112
IOSQE_IO_DRAIN | IOSQE_CQE_SKIP_SUCCESS)
113
113
114
114
#define IO_REQ_CLEAN_FLAGS (REQ_F_BUFFER_SELECTED | REQ_F_NEED_CLEANUP | \
115
- REQ_F_POLLED | REQ_F_INFLIGHT | REQ_F_CREDS | \
116
- REQ_F_ASYNC_DATA)
115
+ REQ_F_POLLED | REQ_F_CREDS | REQ_F_ASYNC_DATA)
117
116
118
117
#define IO_TCTX_REFS_CACHE_NR (1U << 10)
119
118
@@ -500,7 +499,6 @@ struct io_uring_task {
500
499
const struct io_ring_ctx * last ;
501
500
struct io_wq * io_wq ;
502
501
struct percpu_counter inflight ;
503
- atomic_t inflight_tracked ;
504
502
atomic_t in_idle ;
505
503
506
504
spinlock_t task_lock ;
@@ -1186,6 +1184,8 @@ static void io_clean_op(struct io_kiocb *req);
1186
1184
static inline struct file * io_file_get_fixed (struct io_kiocb * req , int fd ,
1187
1185
unsigned issue_flags );
1188
1186
static inline struct file * io_file_get_normal (struct io_kiocb * req , int fd );
1187
+ static void io_drop_inflight_file (struct io_kiocb * req );
1188
+ static bool io_assign_file (struct io_kiocb * req , unsigned int issue_flags );
1189
1189
static void __io_queue_sqe (struct io_kiocb * req );
1190
1190
static void io_rsrc_put_work (struct work_struct * work );
1191
1191
@@ -1433,29 +1433,9 @@ static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
1433
1433
bool cancel_all )
1434
1434
__must_hold (& req - > ctx - > timeout_lock )
1435
1435
{
1436
- struct io_kiocb * req ;
1437
-
1438
1436
if (task && head -> task != task )
1439
1437
return false;
1440
- if (cancel_all )
1441
- return true;
1442
-
1443
- io_for_each_link (req , head ) {
1444
- if (req -> flags & REQ_F_INFLIGHT )
1445
- return true;
1446
- }
1447
- return false;
1448
- }
1449
-
1450
- static bool io_match_linked (struct io_kiocb * head )
1451
- {
1452
- struct io_kiocb * req ;
1453
-
1454
- io_for_each_link (req , head ) {
1455
- if (req -> flags & REQ_F_INFLIGHT )
1456
- return true;
1457
- }
1458
- return false;
1438
+ return cancel_all ;
1459
1439
}
1460
1440
1461
1441
/*
@@ -1465,24 +1445,9 @@ static bool io_match_linked(struct io_kiocb *head)
1465
1445
static bool io_match_task_safe (struct io_kiocb * head , struct task_struct * task ,
1466
1446
bool cancel_all )
1467
1447
{
1468
- bool matched ;
1469
-
1470
1448
if (task && head -> task != task )
1471
1449
return false;
1472
- if (cancel_all )
1473
- return true;
1474
-
1475
- if (head -> flags & REQ_F_LINK_TIMEOUT ) {
1476
- struct io_ring_ctx * ctx = head -> ctx ;
1477
-
1478
- /* protect against races with linked timeouts */
1479
- spin_lock_irq (& ctx -> timeout_lock );
1480
- matched = io_match_linked (head );
1481
- spin_unlock_irq (& ctx -> timeout_lock );
1482
- } else {
1483
- matched = io_match_linked (head );
1484
- }
1485
- return matched ;
1450
+ return cancel_all ;
1486
1451
}
1487
1452
1488
1453
static inline bool req_has_async_data (struct io_kiocb * req )
@@ -1645,14 +1610,6 @@ static inline bool io_req_ffs_set(struct io_kiocb *req)
1645
1610
return req -> flags & REQ_F_FIXED_FILE ;
1646
1611
}
1647
1612
1648
- static inline void io_req_track_inflight (struct io_kiocb * req )
1649
- {
1650
- if (!(req -> flags & REQ_F_INFLIGHT )) {
1651
- req -> flags |= REQ_F_INFLIGHT ;
1652
- atomic_inc (& current -> io_uring -> inflight_tracked );
1653
- }
1654
- }
1655
-
1656
1613
static struct io_kiocb * __io_prep_linked_timeout (struct io_kiocb * req )
1657
1614
{
1658
1615
if (WARN_ON_ONCE (!req -> link ))
@@ -2563,6 +2520,8 @@ static void io_req_task_work_add(struct io_kiocb *req, bool priority)
2563
2520
2564
2521
WARN_ON_ONCE (!tctx );
2565
2522
2523
+ io_drop_inflight_file (req );
2524
+
2566
2525
spin_lock_irqsave (& tctx -> task_lock , flags );
2567
2526
if (priority )
2568
2527
wq_list_add_tail (& req -> io_task_work .node , & tctx -> prior_task_list );
@@ -6008,7 +5967,10 @@ static int io_poll_check_events(struct io_kiocb *req, bool locked)
6008
5967
if (!req -> result ) {
6009
5968
struct poll_table_struct pt = { ._key = req -> cflags };
6010
5969
6011
- req -> result = vfs_poll (req -> file , & pt ) & req -> cflags ;
5970
+ if (unlikely (!io_assign_file (req , IO_URING_F_UNLOCKED )))
5971
+ req -> result = - EBADF ;
5972
+ else
5973
+ req -> result = vfs_poll (req -> file , & pt ) & req -> cflags ;
6012
5974
}
6013
5975
6014
5976
/* multishot, just fill an CQE and proceed */
@@ -7226,11 +7188,6 @@ static void io_clean_op(struct io_kiocb *req)
7226
7188
kfree (req -> apoll );
7227
7189
req -> apoll = NULL ;
7228
7190
}
7229
- if (req -> flags & REQ_F_INFLIGHT ) {
7230
- struct io_uring_task * tctx = req -> task -> io_uring ;
7231
-
7232
- atomic_dec (& tctx -> inflight_tracked );
7233
- }
7234
7191
if (req -> flags & REQ_F_CREDS )
7235
7192
put_cred (req -> creds );
7236
7193
if (req -> flags & REQ_F_ASYNC_DATA ) {
@@ -7522,15 +7479,28 @@ static inline struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
7522
7479
return file ;
7523
7480
}
7524
7481
7482
+ /*
7483
+ * Drop the file for requeue operations. Only used of req->file is the
7484
+ * io_uring descriptor itself.
7485
+ */
7486
+ static void io_drop_inflight_file (struct io_kiocb * req )
7487
+ {
7488
+ if (unlikely (req -> flags & REQ_F_INFLIGHT )) {
7489
+ fput (req -> file );
7490
+ req -> file = NULL ;
7491
+ req -> flags &= ~REQ_F_INFLIGHT ;
7492
+ }
7493
+ }
7494
+
7525
7495
static struct file * io_file_get_normal (struct io_kiocb * req , int fd )
7526
7496
{
7527
7497
struct file * file = fget (fd );
7528
7498
7529
7499
trace_io_uring_file_get (req -> ctx , req , req -> user_data , fd );
7530
7500
7531
7501
/* we don't allow fixed io_uring files */
7532
- if (file && unlikely ( file -> f_op == & io_uring_fops ) )
7533
- io_req_track_inflight ( req ) ;
7502
+ if (file && file -> f_op == & io_uring_fops )
7503
+ req -> flags |= REQ_F_INFLIGHT ;
7534
7504
return file ;
7535
7505
}
7536
7506
@@ -9437,7 +9407,6 @@ static __cold int io_uring_alloc_task_context(struct task_struct *task,
9437
9407
xa_init (& tctx -> xa );
9438
9408
init_waitqueue_head (& tctx -> wait );
9439
9409
atomic_set (& tctx -> in_idle , 0 );
9440
- atomic_set (& tctx -> inflight_tracked , 0 );
9441
9410
task -> io_uring = tctx ;
9442
9411
spin_lock_init (& tctx -> task_lock );
9443
9412
INIT_WQ_LIST (& tctx -> task_list );
@@ -10630,7 +10599,7 @@ static __cold void io_uring_clean_tctx(struct io_uring_task *tctx)
10630
10599
static s64 tctx_inflight (struct io_uring_task * tctx , bool tracked )
10631
10600
{
10632
10601
if (tracked )
10633
- return atomic_read ( & tctx -> inflight_tracked ) ;
10602
+ return 0 ;
10634
10603
return percpu_counter_sum (& tctx -> inflight );
10635
10604
}
10636
10605
0 commit comments