Skip to content

Commit c82da38

Browse files
committed
Merge tag 'io_uring-6.14-20250131' of git://git.kernel.dk/linux
Pull more io_uring updates from Jens Axboe: - Series cleaning up the alloc cache changes from this merge window, and then another series on top making it better yet. This also solves an issue with KASAN_EXTRA_INFO, by making io_uring resilient to KASAN using parts of the freed struct for storage - Cleanups and simplications to buffer cloning and io resource node management - Fix an issue introduced in this merge window where READ/WRITE_ONCE was used on an atomic_t, which made some archs complain - Fix for an errant connect retry when the socket has been shut down - Fix for multishot and provided buffers * tag 'io_uring-6.14-20250131' of git://git.kernel.dk/linux: io_uring/net: don't retry connect operation on EPOLLERR io_uring/rw: simplify io_rw_recycle() io_uring: remove !KASAN guards from cache free io_uring/net: extract io_send_select_buffer() io_uring/net: clean io_msg_copy_hdr() io_uring/net: make io_net_vec_assign() return void io_uring: add alloc_cache.c io_uring: dont ifdef io_alloc_cache_kasan() io_uring: include all deps for alloc_cache.h io_uring: fix multishots with selected buffers io_uring/register: use atomic_read/write for sq_flags migration io_uring/alloc_cache: get rid of _nocache() helper io_uring: get rid of alloc cache init_once handling io_uring/uring_cmd: cleanup struct io_uring_cmd_data layout io_uring/uring_cmd: use cached cmd_op in io_uring_cmd_sock() io_uring/msg_ring: don't leave potentially dangling ->tctx pointer io_uring/rsrc: Move lockdep assert from io_free_rsrc_node() to caller io_uring/rsrc: remove unused parameter ctx for io_rsrc_node_alloc() io_uring: clean up io_uring_register_get_file() io_uring/rsrc: Simplify buffer cloning by locking both rings
2 parents 95d7e82 + 8c8492c commit c82da38

File tree

21 files changed

+272
-243
lines changed

21 files changed

+272
-243
lines changed

include/linux/io_uring/cmd.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,8 +19,8 @@ struct io_uring_cmd {
1919
};
2020

2121
struct io_uring_cmd_data {
22-
struct io_uring_sqe sqes[2];
2322
void *op_data;
23+
struct io_uring_sqe sqes[2];
2424
};
2525

2626
static inline const void *io_uring_sqe_cmd(const struct io_uring_sqe *sqe)

include/linux/io_uring_types.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -222,7 +222,8 @@ struct io_alloc_cache {
222222
void **entries;
223223
unsigned int nr_cached;
224224
unsigned int max_cached;
225-
size_t elem_size;
225+
unsigned int elem_size;
226+
unsigned int init_clear;
226227
};
227228

228229
struct io_ring_ctx {

io_uring/Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ obj-$(CONFIG_IO_URING) += io_uring.o opdef.o kbuf.o rsrc.o notif.o \
1313
sync.o msg_ring.o advise.o openclose.o \
1414
epoll.o statx.o timeout.o fdinfo.o \
1515
cancel.o waitid.o register.o \
16-
truncate.o memmap.o
16+
truncate.o memmap.o alloc_cache.o
1717
obj-$(CONFIG_IO_WQ) += io-wq.o
1818
obj-$(CONFIG_FUTEX) += futex.o
1919
obj-$(CONFIG_NET_RX_BUSY_POLL) += napi.o

io_uring/alloc_cache.c

Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,44 @@
1+
// SPDX-License-Identifier: GPL-2.0
2+
3+
#include "alloc_cache.h"
4+
5+
void io_alloc_cache_free(struct io_alloc_cache *cache,
6+
void (*free)(const void *))
7+
{
8+
void *entry;
9+
10+
if (!cache->entries)
11+
return;
12+
13+
while ((entry = io_alloc_cache_get(cache)) != NULL)
14+
free(entry);
15+
16+
kvfree(cache->entries);
17+
cache->entries = NULL;
18+
}
19+
20+
/* returns false if the cache was initialized properly */
21+
bool io_alloc_cache_init(struct io_alloc_cache *cache,
22+
unsigned max_nr, unsigned int size,
23+
unsigned int init_bytes)
24+
{
25+
cache->entries = kvmalloc_array(max_nr, sizeof(void *), GFP_KERNEL);
26+
if (!cache->entries)
27+
return true;
28+
29+
cache->nr_cached = 0;
30+
cache->max_cached = max_nr;
31+
cache->elem_size = size;
32+
cache->init_clear = init_bytes;
33+
return false;
34+
}
35+
36+
void *io_cache_alloc_new(struct io_alloc_cache *cache, gfp_t gfp)
37+
{
38+
void *obj;
39+
40+
obj = kmalloc(cache->elem_size, gfp);
41+
if (obj && cache->init_clear)
42+
memset(obj, 0, cache->init_clear);
43+
return obj;
44+
}

io_uring/alloc_cache.h

Lines changed: 33 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,30 @@
11
#ifndef IOU_ALLOC_CACHE_H
22
#define IOU_ALLOC_CACHE_H
33

4+
#include <linux/io_uring_types.h>
5+
46
/*
57
* Don't allow the cache to grow beyond this size.
68
*/
79
#define IO_ALLOC_CACHE_MAX 128
810

11+
void io_alloc_cache_free(struct io_alloc_cache *cache,
12+
void (*free)(const void *));
13+
bool io_alloc_cache_init(struct io_alloc_cache *cache,
14+
unsigned max_nr, unsigned int size,
15+
unsigned int init_bytes);
16+
17+
void *io_cache_alloc_new(struct io_alloc_cache *cache, gfp_t gfp);
18+
19+
static inline void io_alloc_cache_kasan(struct iovec **iov, int *nr)
20+
{
21+
if (IS_ENABLED(CONFIG_KASAN)) {
22+
kfree(*iov);
23+
*iov = NULL;
24+
*nr = 0;
25+
}
26+
}
27+
928
static inline bool io_alloc_cache_put(struct io_alloc_cache *cache,
1029
void *entry)
1130
{
@@ -23,52 +42,30 @@ static inline void *io_alloc_cache_get(struct io_alloc_cache *cache)
2342
if (cache->nr_cached) {
2443
void *entry = cache->entries[--cache->nr_cached];
2544

45+
/*
46+
* If KASAN is enabled, always clear the initial bytes that
47+
* must be zeroed post alloc, in case any of them overlap
48+
* with KASAN storage.
49+
*/
50+
#if defined(CONFIG_KASAN)
2651
kasan_mempool_unpoison_object(entry, cache->elem_size);
52+
if (cache->init_clear)
53+
memset(entry, 0, cache->init_clear);
54+
#endif
2755
return entry;
2856
}
2957

3058
return NULL;
3159
}
3260

33-
static inline void *io_cache_alloc(struct io_alloc_cache *cache, gfp_t gfp,
34-
void (*init_once)(void *obj))
61+
static inline void *io_cache_alloc(struct io_alloc_cache *cache, gfp_t gfp)
3562
{
36-
if (unlikely(!cache->nr_cached)) {
37-
void *obj = kmalloc(cache->elem_size, gfp);
63+
void *obj;
3864

39-
if (obj && init_once)
40-
init_once(obj);
65+
obj = io_alloc_cache_get(cache);
66+
if (obj)
4167
return obj;
42-
}
43-
return io_alloc_cache_get(cache);
68+
return io_cache_alloc_new(cache, gfp);
4469
}
4570

46-
/* returns false if the cache was initialized properly */
47-
static inline bool io_alloc_cache_init(struct io_alloc_cache *cache,
48-
unsigned max_nr, size_t size)
49-
{
50-
cache->entries = kvmalloc_array(max_nr, sizeof(void *), GFP_KERNEL);
51-
if (cache->entries) {
52-
cache->nr_cached = 0;
53-
cache->max_cached = max_nr;
54-
cache->elem_size = size;
55-
return false;
56-
}
57-
return true;
58-
}
59-
60-
static inline void io_alloc_cache_free(struct io_alloc_cache *cache,
61-
void (*free)(const void *))
62-
{
63-
void *entry;
64-
65-
if (!cache->entries)
66-
return;
67-
68-
while ((entry = io_alloc_cache_get(cache)) != NULL)
69-
free(entry);
70-
71-
kvfree(cache->entries);
72-
cache->entries = NULL;
73-
}
7471
#endif

io_uring/filetable.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,7 @@ static int io_install_fixed_file(struct io_ring_ctx *ctx, struct file *file,
6868
if (slot_index >= ctx->file_table.data.nr)
6969
return -EINVAL;
7070

71-
node = io_rsrc_node_alloc(ctx, IORING_RSRC_FILE);
71+
node = io_rsrc_node_alloc(IORING_RSRC_FILE);
7272
if (!node)
7373
return -ENOMEM;
7474

io_uring/futex.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ struct io_futex_data {
3636
bool io_futex_cache_init(struct io_ring_ctx *ctx)
3737
{
3838
return io_alloc_cache_init(&ctx->futex_cache, IO_FUTEX_ALLOC_CACHE_MAX,
39-
sizeof(struct io_futex_data));
39+
sizeof(struct io_futex_data), 0);
4040
}
4141

4242
void io_futex_cache_free(struct io_ring_ctx *ctx)
@@ -320,7 +320,7 @@ int io_futex_wait(struct io_kiocb *req, unsigned int issue_flags)
320320
}
321321

322322
io_ring_submit_lock(ctx, issue_flags);
323-
ifd = io_cache_alloc(&ctx->futex_cache, GFP_NOWAIT, NULL);
323+
ifd = io_cache_alloc(&ctx->futex_cache, GFP_NOWAIT);
324324
if (!ifd) {
325325
ret = -ENOMEM;
326326
goto done_unlock;

io_uring/io_uring.c

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -315,16 +315,18 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
315315
INIT_LIST_HEAD(&ctx->cq_overflow_list);
316316
INIT_LIST_HEAD(&ctx->io_buffers_cache);
317317
ret = io_alloc_cache_init(&ctx->apoll_cache, IO_POLL_ALLOC_CACHE_MAX,
318-
sizeof(struct async_poll));
318+
sizeof(struct async_poll), 0);
319319
ret |= io_alloc_cache_init(&ctx->netmsg_cache, IO_ALLOC_CACHE_MAX,
320-
sizeof(struct io_async_msghdr));
320+
sizeof(struct io_async_msghdr),
321+
offsetof(struct io_async_msghdr, clear));
321322
ret |= io_alloc_cache_init(&ctx->rw_cache, IO_ALLOC_CACHE_MAX,
322-
sizeof(struct io_async_rw));
323+
sizeof(struct io_async_rw),
324+
offsetof(struct io_async_rw, clear));
323325
ret |= io_alloc_cache_init(&ctx->uring_cache, IO_ALLOC_CACHE_MAX,
324-
sizeof(struct io_uring_cmd_data));
326+
sizeof(struct io_uring_cmd_data), 0);
325327
spin_lock_init(&ctx->msg_lock);
326328
ret |= io_alloc_cache_init(&ctx->msg_cache, IO_ALLOC_CACHE_MAX,
327-
sizeof(struct io_kiocb));
329+
sizeof(struct io_kiocb), 0);
328330
ret |= io_futex_cache_init(ctx);
329331
if (ret)
330332
goto free_ref;

io_uring/io_uring.h

Lines changed: 8 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -226,21 +226,16 @@ static inline void io_req_set_res(struct io_kiocb *req, s32 res, u32 cflags)
226226
}
227227

228228
static inline void *io_uring_alloc_async_data(struct io_alloc_cache *cache,
229-
struct io_kiocb *req,
230-
void (*init_once)(void *obj))
229+
struct io_kiocb *req)
231230
{
232-
req->async_data = io_cache_alloc(cache, GFP_KERNEL, init_once);
233-
if (req->async_data)
234-
req->flags |= REQ_F_ASYNC_DATA;
235-
return req->async_data;
236-
}
231+
if (cache) {
232+
req->async_data = io_cache_alloc(cache, GFP_KERNEL);
233+
} else {
234+
const struct io_issue_def *def = &io_issue_defs[req->opcode];
237235

238-
static inline void *io_uring_alloc_async_data_nocache(struct io_kiocb *req)
239-
{
240-
const struct io_issue_def *def = &io_issue_defs[req->opcode];
241-
242-
WARN_ON_ONCE(!def->async_size);
243-
req->async_data = kmalloc(def->async_size, GFP_KERNEL);
236+
WARN_ON_ONCE(!def->async_size);
237+
req->async_data = kmalloc(def->async_size, GFP_KERNEL);
238+
}
244239
if (req->async_data)
245240
req->flags |= REQ_F_ASYNC_DATA;
246241
return req->async_data;

io_uring/msg_ring.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -89,15 +89,15 @@ static void io_msg_tw_complete(struct io_kiocb *req, struct io_tw_state *ts)
8989
static int io_msg_remote_post(struct io_ring_ctx *ctx, struct io_kiocb *req,
9090
int res, u32 cflags, u64 user_data)
9191
{
92-
req->tctx = READ_ONCE(ctx->submitter_task->io_uring);
93-
if (!req->tctx) {
92+
if (!READ_ONCE(ctx->submitter_task)) {
9493
kmem_cache_free(req_cachep, req);
9594
return -EOWNERDEAD;
9695
}
9796
req->cqe.user_data = user_data;
9897
io_req_set_res(req, res, cflags);
9998
percpu_ref_get(&ctx->refs);
10099
req->ctx = ctx;
100+
req->tctx = NULL;
101101
req->io_task_work.func = io_msg_tw_complete;
102102
io_req_task_work_add_remote(req, ctx, IOU_F_TWQ_LAZY_WAKE);
103103
return 0;

0 commit comments

Comments
 (0)