@@ -149,7 +149,6 @@ static bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
149
149
static void io_queue_sqe (struct io_kiocb * req );
150
150
static void io_move_task_work_from_local (struct io_ring_ctx * ctx );
151
151
static void __io_submit_flush_completions (struct io_ring_ctx * ctx );
152
- static __cold void io_fallback_tw (struct io_uring_task * tctx );
153
152
154
153
struct kmem_cache * req_cachep ;
155
154
@@ -1238,6 +1237,20 @@ static inline struct llist_node *io_llist_cmpxchg(struct llist_head *head,
1238
1237
return cmpxchg (& head -> first , old , new );
1239
1238
}
1240
1239
1240
+ static __cold void io_fallback_tw (struct io_uring_task * tctx )
1241
+ {
1242
+ struct llist_node * node = llist_del_all (& tctx -> task_list );
1243
+ struct io_kiocb * req ;
1244
+
1245
+ while (node ) {
1246
+ req = container_of (node , struct io_kiocb , io_task_work .node );
1247
+ node = node -> next ;
1248
+ if (llist_add (& req -> io_task_work .node ,
1249
+ & req -> ctx -> fallback_llist ))
1250
+ schedule_delayed_work (& req -> ctx -> fallback_work , 1 );
1251
+ }
1252
+ }
1253
+
1241
1254
void tctx_task_work (struct callback_head * cb )
1242
1255
{
1243
1256
struct io_tw_state ts = {};
@@ -1279,20 +1292,6 @@ void tctx_task_work(struct callback_head *cb)
1279
1292
trace_io_uring_task_work_run (tctx , count , loops );
1280
1293
}
1281
1294
1282
- static __cold void io_fallback_tw (struct io_uring_task * tctx )
1283
- {
1284
- struct llist_node * node = llist_del_all (& tctx -> task_list );
1285
- struct io_kiocb * req ;
1286
-
1287
- while (node ) {
1288
- req = container_of (node , struct io_kiocb , io_task_work .node );
1289
- node = node -> next ;
1290
- if (llist_add (& req -> io_task_work .node ,
1291
- & req -> ctx -> fallback_llist ))
1292
- schedule_delayed_work (& req -> ctx -> fallback_work , 1 );
1293
- }
1294
- }
1295
-
1296
1295
static inline void io_req_local_work_add (struct io_kiocb * req , unsigned flags )
1297
1296
{
1298
1297
struct io_ring_ctx * ctx = req -> ctx ;
0 commit comments