@@ -1277,6 +1277,8 @@ static int __io_run_local_work_loop(struct llist_node **node,
1277
1277
struct io_tw_state * ts ,
1278
1278
int events )
1279
1279
{
1280
+ int ret = 0 ;
1281
+
1280
1282
while (* node ) {
1281
1283
struct llist_node * next = (* node )-> next ;
1282
1284
struct io_kiocb * req = container_of (* node , struct io_kiocb ,
@@ -1285,27 +1287,27 @@ static int __io_run_local_work_loop(struct llist_node **node,
1285
1287
io_poll_task_func , io_req_rw_complete ,
1286
1288
req , ts );
1287
1289
* node = next ;
1288
- if (-- events <= 0 )
1290
+ if (++ ret >= events )
1289
1291
break ;
1290
1292
}
1291
1293
1292
- return events ;
1294
+ return ret ;
1293
1295
}
1294
1296
1295
1297
static int __io_run_local_work (struct io_ring_ctx * ctx , struct io_tw_state * ts ,
1296
- int min_events )
1298
+ int min_events , int max_events )
1297
1299
{
1298
1300
struct llist_node * node ;
1299
1301
unsigned int loops = 0 ;
1300
- int ret , limit ;
1302
+ int ret = 0 ;
1301
1303
1302
1304
if (WARN_ON_ONCE (ctx -> submitter_task != current ))
1303
1305
return - EEXIST ;
1304
1306
if (ctx -> flags & IORING_SETUP_TASKRUN_FLAG )
1305
1307
atomic_andnot (IORING_SQ_TASKRUN , & ctx -> rings -> sq_flags );
1306
- limit = max (IO_LOCAL_TW_DEFAULT_MAX , min_events );
1307
1308
again :
1308
- ret = __io_run_local_work_loop (& ctx -> retry_llist .first , ts , limit );
1309
+ min_events -= ret ;
1310
+ ret = __io_run_local_work_loop (& ctx -> retry_llist .first , ts , max_events );
1309
1311
if (ctx -> retry_llist .first )
1310
1312
goto retry_done ;
1311
1313
@@ -1314,11 +1316,10 @@ static int __io_run_local_work(struct io_ring_ctx *ctx, struct io_tw_state *ts,
1314
1316
* running the pending items.
1315
1317
*/
1316
1318
node = llist_reverse_order (llist_del_all (& ctx -> work_llist ));
1317
- ret = __io_run_local_work_loop (& node , ts , ret );
1319
+ ret + = __io_run_local_work_loop (& node , ts , max_events - ret );
1318
1320
ctx -> retry_llist .first = node ;
1319
1321
loops ++ ;
1320
1322
1321
- ret = limit - ret ;
1322
1323
if (io_run_local_work_continue (ctx , ret , min_events ))
1323
1324
goto again ;
1324
1325
retry_done :
@@ -1337,16 +1338,18 @@ static inline int io_run_local_work_locked(struct io_ring_ctx *ctx,
1337
1338
1338
1339
if (!io_local_work_pending (ctx ))
1339
1340
return 0 ;
1340
- return __io_run_local_work (ctx , & ts , min_events );
1341
+ return __io_run_local_work (ctx , & ts , min_events ,
1342
+ max (IO_LOCAL_TW_DEFAULT_MAX , min_events ));
1341
1343
}
1342
1344
1343
- static int io_run_local_work (struct io_ring_ctx * ctx , int min_events )
1345
+ static int io_run_local_work (struct io_ring_ctx * ctx , int min_events ,
1346
+ int max_events )
1344
1347
{
1345
1348
struct io_tw_state ts = {};
1346
1349
int ret ;
1347
1350
1348
1351
mutex_lock (& ctx -> uring_lock );
1349
- ret = __io_run_local_work (ctx , & ts , min_events );
1352
+ ret = __io_run_local_work (ctx , & ts , min_events , max_events );
1350
1353
mutex_unlock (& ctx -> uring_lock );
1351
1354
return ret ;
1352
1355
}
@@ -2352,7 +2355,7 @@ int io_run_task_work_sig(struct io_ring_ctx *ctx)
2352
2355
{
2353
2356
if (io_local_work_pending (ctx )) {
2354
2357
__set_current_state (TASK_RUNNING );
2355
- if (io_run_local_work (ctx , INT_MAX ) > 0 )
2358
+ if (io_run_local_work (ctx , INT_MAX , IO_LOCAL_TW_DEFAULT_MAX ) > 0 )
2356
2359
return 0 ;
2357
2360
}
2358
2361
if (io_run_task_work () > 0 )
@@ -2515,7 +2518,8 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, u32 flags,
2515
2518
if (!io_allowed_run_tw (ctx ))
2516
2519
return - EEXIST ;
2517
2520
if (io_local_work_pending (ctx ))
2518
- io_run_local_work (ctx , min_events );
2521
+ io_run_local_work (ctx , min_events ,
2522
+ max (IO_LOCAL_TW_DEFAULT_MAX , min_events ));
2519
2523
io_run_task_work ();
2520
2524
2521
2525
if (unlikely (test_bit (IO_CHECK_CQ_OVERFLOW_BIT , & ctx -> check_cq )))
@@ -2586,7 +2590,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, u32 flags,
2586
2590
* now rather than let the caller do another wait loop.
2587
2591
*/
2588
2592
if (io_local_work_pending (ctx ))
2589
- io_run_local_work (ctx , nr_wait );
2593
+ io_run_local_work (ctx , nr_wait , nr_wait );
2590
2594
io_run_task_work ();
2591
2595
2592
2596
/*
@@ -3098,7 +3102,7 @@ static __cold bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
3098
3102
3099
3103
if ((ctx -> flags & IORING_SETUP_DEFER_TASKRUN ) &&
3100
3104
io_allowed_defer_tw_run (ctx ))
3101
- ret |= io_run_local_work (ctx , INT_MAX ) > 0 ;
3105
+ ret |= io_run_local_work (ctx , INT_MAX , INT_MAX ) > 0 ;
3102
3106
ret |= io_cancel_defer_files (ctx , tctx , cancel_all );
3103
3107
mutex_lock (& ctx -> uring_lock );
3104
3108
ret |= io_poll_remove_all (ctx , tctx , cancel_all );
0 commit comments