@@ -270,24 +270,6 @@ struct io_ring_ctx {
270
270
struct io_alloc_cache netmsg_cache ;
271
271
} ____cacheline_aligned_in_smp ;
272
272
273
- /* IRQ completion list, under ->completion_lock */
274
- struct io_wq_work_list locked_free_list ;
275
- unsigned int locked_free_nr ;
276
-
277
- const struct cred * sq_creds ; /* cred used for __io_sq_thread() */
278
- struct io_sq_data * sq_data ; /* if using sq thread polling */
279
-
280
- struct wait_queue_head sqo_sq_wait ;
281
- struct list_head sqd_list ;
282
-
283
- unsigned long check_cq ;
284
-
285
- unsigned int file_alloc_start ;
286
- unsigned int file_alloc_end ;
287
-
288
- struct xarray personalities ;
289
- u32 pers_next ;
290
-
291
273
struct {
292
274
/*
293
275
* We cache a range of free CQEs we can use, once exhausted it
@@ -332,6 +314,24 @@ struct io_ring_ctx {
332
314
unsigned cq_last_tm_flush ;
333
315
} ____cacheline_aligned_in_smp ;
334
316
317
+ /* IRQ completion list, under ->completion_lock */
318
+ struct io_wq_work_list locked_free_list ;
319
+ unsigned int locked_free_nr ;
320
+
321
+ const struct cred * sq_creds ; /* cred used for __io_sq_thread() */
322
+ struct io_sq_data * sq_data ; /* if using sq thread polling */
323
+
324
+ struct wait_queue_head sqo_sq_wait ;
325
+ struct list_head sqd_list ;
326
+
327
+ unsigned long check_cq ;
328
+
329
+ unsigned int file_alloc_start ;
330
+ unsigned int file_alloc_end ;
331
+
332
+ struct xarray personalities ;
333
+ u32 pers_next ;
334
+
335
335
/* Keep this last, we don't need it for the fast path */
336
336
struct wait_queue_head poll_wq ;
337
337
struct io_restriction restrictions ;
0 commit comments