Skip to content

Commit d536123

Browse files
committed
io_uring: drop the old style inflight file tracking
io_uring tracks requests that are referencing an io_uring descriptor to be able to cancel without worrying about loops in the references. Since we now assign the file at execution time, the easier approach is to drop a potentially problematic reference before we punt the request. This eliminates the need to special case these types of files beyond just marking them as such, and simplifies cancelation quite a bit. This also fixes a recent issue where an async punted tee operation would with the io_uring descriptor as the output file would crash when attempting to get a reference to the file from the io-wq worker. We could have worked around that, but this is the much cleaner fix. Fixes: 6bf9c47 ("io_uring: defer file assignment") Reported-by: syzbot+c4b9303500a21750b250@syzkaller.appspotmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent 6bf9c47 commit d536123

File tree

1 file changed

+27
-58
lines changed

1 file changed

+27
-58
lines changed

fs/io_uring.c

Lines changed: 27 additions & 58 deletions
Original file line numberDiff line numberDiff line change
@@ -112,8 +112,7 @@
112112
IOSQE_IO_DRAIN | IOSQE_CQE_SKIP_SUCCESS)
113113

114114
#define IO_REQ_CLEAN_FLAGS (REQ_F_BUFFER_SELECTED | REQ_F_NEED_CLEANUP | \
115-
REQ_F_POLLED | REQ_F_INFLIGHT | REQ_F_CREDS | \
116-
REQ_F_ASYNC_DATA)
115+
REQ_F_POLLED | REQ_F_CREDS | REQ_F_ASYNC_DATA)
117116

118117
#define IO_TCTX_REFS_CACHE_NR (1U << 10)
119118

@@ -500,7 +499,6 @@ struct io_uring_task {
500499
const struct io_ring_ctx *last;
501500
struct io_wq *io_wq;
502501
struct percpu_counter inflight;
503-
atomic_t inflight_tracked;
504502
atomic_t in_idle;
505503

506504
spinlock_t task_lock;
@@ -1186,6 +1184,8 @@ static void io_clean_op(struct io_kiocb *req);
11861184
static inline struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
11871185
unsigned issue_flags);
11881186
static inline struct file *io_file_get_normal(struct io_kiocb *req, int fd);
1187+
static void io_drop_inflight_file(struct io_kiocb *req);
1188+
static bool io_assign_file(struct io_kiocb *req, unsigned int issue_flags);
11891189
static void __io_queue_sqe(struct io_kiocb *req);
11901190
static void io_rsrc_put_work(struct work_struct *work);
11911191

@@ -1433,29 +1433,9 @@ static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
14331433
bool cancel_all)
14341434
__must_hold(&req->ctx->timeout_lock)
14351435
{
1436-
struct io_kiocb *req;
1437-
14381436
if (task && head->task != task)
14391437
return false;
1440-
if (cancel_all)
1441-
return true;
1442-
1443-
io_for_each_link(req, head) {
1444-
if (req->flags & REQ_F_INFLIGHT)
1445-
return true;
1446-
}
1447-
return false;
1448-
}
1449-
1450-
static bool io_match_linked(struct io_kiocb *head)
1451-
{
1452-
struct io_kiocb *req;
1453-
1454-
io_for_each_link(req, head) {
1455-
if (req->flags & REQ_F_INFLIGHT)
1456-
return true;
1457-
}
1458-
return false;
1438+
return cancel_all;
14591439
}
14601440

14611441
/*
@@ -1465,24 +1445,9 @@ static bool io_match_linked(struct io_kiocb *head)
14651445
static bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
14661446
bool cancel_all)
14671447
{
1468-
bool matched;
1469-
14701448
if (task && head->task != task)
14711449
return false;
1472-
if (cancel_all)
1473-
return true;
1474-
1475-
if (head->flags & REQ_F_LINK_TIMEOUT) {
1476-
struct io_ring_ctx *ctx = head->ctx;
1477-
1478-
/* protect against races with linked timeouts */
1479-
spin_lock_irq(&ctx->timeout_lock);
1480-
matched = io_match_linked(head);
1481-
spin_unlock_irq(&ctx->timeout_lock);
1482-
} else {
1483-
matched = io_match_linked(head);
1484-
}
1485-
return matched;
1450+
return cancel_all;
14861451
}
14871452

14881453
static inline bool req_has_async_data(struct io_kiocb *req)
@@ -1645,14 +1610,6 @@ static inline bool io_req_ffs_set(struct io_kiocb *req)
16451610
return req->flags & REQ_F_FIXED_FILE;
16461611
}
16471612

1648-
static inline void io_req_track_inflight(struct io_kiocb *req)
1649-
{
1650-
if (!(req->flags & REQ_F_INFLIGHT)) {
1651-
req->flags |= REQ_F_INFLIGHT;
1652-
atomic_inc(&current->io_uring->inflight_tracked);
1653-
}
1654-
}
1655-
16561613
static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req)
16571614
{
16581615
if (WARN_ON_ONCE(!req->link))
@@ -2563,6 +2520,8 @@ static void io_req_task_work_add(struct io_kiocb *req, bool priority)
25632520

25642521
WARN_ON_ONCE(!tctx);
25652522

2523+
io_drop_inflight_file(req);
2524+
25662525
spin_lock_irqsave(&tctx->task_lock, flags);
25672526
if (priority)
25682527
wq_list_add_tail(&req->io_task_work.node, &tctx->prior_task_list);
@@ -6008,7 +5967,10 @@ static int io_poll_check_events(struct io_kiocb *req, bool locked)
60085967
if (!req->result) {
60095968
struct poll_table_struct pt = { ._key = req->cflags };
60105969

6011-
req->result = vfs_poll(req->file, &pt) & req->cflags;
5970+
if (unlikely(!io_assign_file(req, IO_URING_F_UNLOCKED)))
5971+
req->result = -EBADF;
5972+
else
5973+
req->result = vfs_poll(req->file, &pt) & req->cflags;
60125974
}
60135975

60145976
/* multishot, just fill an CQE and proceed */
@@ -7226,11 +7188,6 @@ static void io_clean_op(struct io_kiocb *req)
72267188
kfree(req->apoll);
72277189
req->apoll = NULL;
72287190
}
7229-
if (req->flags & REQ_F_INFLIGHT) {
7230-
struct io_uring_task *tctx = req->task->io_uring;
7231-
7232-
atomic_dec(&tctx->inflight_tracked);
7233-
}
72347191
if (req->flags & REQ_F_CREDS)
72357192
put_cred(req->creds);
72367193
if (req->flags & REQ_F_ASYNC_DATA) {
@@ -7522,15 +7479,28 @@ static inline struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
75227479
return file;
75237480
}
75247481

7482+
/*
7483+
* Drop the file for requeue operations. Only used of req->file is the
7484+
* io_uring descriptor itself.
7485+
*/
7486+
static void io_drop_inflight_file(struct io_kiocb *req)
7487+
{
7488+
if (unlikely(req->flags & REQ_F_INFLIGHT)) {
7489+
fput(req->file);
7490+
req->file = NULL;
7491+
req->flags &= ~REQ_F_INFLIGHT;
7492+
}
7493+
}
7494+
75257495
static struct file *io_file_get_normal(struct io_kiocb *req, int fd)
75267496
{
75277497
struct file *file = fget(fd);
75287498

75297499
trace_io_uring_file_get(req->ctx, req, req->user_data, fd);
75307500

75317501
/* we don't allow fixed io_uring files */
7532-
if (file && unlikely(file->f_op == &io_uring_fops))
7533-
io_req_track_inflight(req);
7502+
if (file && file->f_op == &io_uring_fops)
7503+
req->flags |= REQ_F_INFLIGHT;
75347504
return file;
75357505
}
75367506

@@ -9437,7 +9407,6 @@ static __cold int io_uring_alloc_task_context(struct task_struct *task,
94379407
xa_init(&tctx->xa);
94389408
init_waitqueue_head(&tctx->wait);
94399409
atomic_set(&tctx->in_idle, 0);
9440-
atomic_set(&tctx->inflight_tracked, 0);
94419410
task->io_uring = tctx;
94429411
spin_lock_init(&tctx->task_lock);
94439412
INIT_WQ_LIST(&tctx->task_list);
@@ -10630,7 +10599,7 @@ static __cold void io_uring_clean_tctx(struct io_uring_task *tctx)
1063010599
static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked)
1063110600
{
1063210601
if (tracked)
10633-
return atomic_read(&tctx->inflight_tracked);
10602+
return 0;
1063410603
return percpu_counter_sum(&tctx->inflight);
1063510604
}
1063610605

0 commit comments

Comments
 (0)