Skip to content

Commit 18df385

Browse files
isilenceaxboe
authored andcommitted
io_uring: banish non-hot data to end of io_ring_ctx
Let's move all slow path, setup/init and so on fields to the end of io_ring_ctx, that makes ctx reorganisation later easier. That includes, page arrays used only on tear down, CQ overflow list, old provided buffer caches and used by io-wq poll hashes. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/fc471b63925a0bf90a34943c4d36163c523cfb43.1692916914.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent d7f06fe commit 18df385

File tree

1 file changed

+19
-18
lines changed

1 file changed

+19
-18
lines changed

include/linux/io_uring_types.h

Lines changed: 19 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -211,20 +211,11 @@ struct io_ring_ctx {
211211
unsigned int drain_disabled: 1;
212212
unsigned int compat: 1;
213213

214-
enum task_work_notify_mode notify_method;
214+
struct task_struct *submitter_task;
215+
struct io_rings *rings;
216+
struct percpu_ref refs;
215217

216-
/*
217-
* If IORING_SETUP_NO_MMAP is used, then the below holds
218-
* the gup'ed pages for the two rings, and the sqes.
219-
*/
220-
unsigned short n_ring_pages;
221-
unsigned short n_sqe_pages;
222-
struct page **ring_pages;
223-
struct page **sqe_pages;
224-
225-
struct io_rings *rings;
226-
struct task_struct *submitter_task;
227-
struct percpu_ref refs;
218+
enum task_work_notify_mode notify_method;
228219
} ____cacheline_aligned_in_smp;
229220

230221
/* submission data */
@@ -262,10 +253,8 @@ struct io_ring_ctx {
262253

263254
struct io_buffer_list *io_bl;
264255
struct xarray io_bl_xa;
265-
struct list_head io_buffers_cache;
266256

267257
struct io_hash_table cancel_table_locked;
268-
struct list_head cq_overflow_list;
269258
struct io_alloc_cache apoll_cache;
270259
struct io_alloc_cache netmsg_cache;
271260
} ____cacheline_aligned_in_smp;
@@ -298,11 +287,8 @@ struct io_ring_ctx {
298287
* manipulate the list, hence no extra locking is needed there.
299288
*/
300289
struct io_wq_work_list iopoll_list;
301-
struct io_hash_table cancel_table;
302290

303291
struct llist_head work_llist;
304-
305-
struct list_head io_buffers_comp;
306292
} ____cacheline_aligned_in_smp;
307293

308294
/* timeouts */
@@ -318,6 +304,10 @@ struct io_ring_ctx {
318304
struct io_wq_work_list locked_free_list;
319305
unsigned int locked_free_nr;
320306

307+
struct list_head io_buffers_comp;
308+
struct list_head cq_overflow_list;
309+
struct io_hash_table cancel_table;
310+
321311
const struct cred *sq_creds; /* cred used for __io_sq_thread() */
322312
struct io_sq_data *sq_data; /* if using sq thread polling */
323313

@@ -332,6 +322,8 @@ struct io_ring_ctx {
332322
struct xarray personalities;
333323
u32 pers_next;
334324

325+
struct list_head io_buffers_cache;
326+
335327
/* Keep this last, we don't need it for the fast path */
336328
struct wait_queue_head poll_wq;
337329
struct io_restriction restrictions;
@@ -375,6 +367,15 @@ struct io_ring_ctx {
375367
unsigned sq_thread_idle;
376368
/* protected by ->completion_lock */
377369
unsigned evfd_last_cq_tail;
370+
371+
/*
372+
* If IORING_SETUP_NO_MMAP is used, then the below holds
373+
* the gup'ed pages for the two rings, and the sqes.
374+
*/
375+
unsigned short n_ring_pages;
376+
unsigned short n_sqe_pages;
377+
struct page **ring_pages;
378+
struct page **sqe_pages;
378379
};
379380

380381
struct io_tw_state {

0 commit comments

Comments
 (0)