Skip to content

Commit d871198

Browse files
committed
io_uring/fdinfo: grab ctx->uring_lock around io_uring_show_fdinfo()
Not everything requires locking in there, which is why the 'has_lock' variable exists. But enough does that it's a bit unwieldy to manage. Wrap the whole thing in a ->uring_lock trylock, and just return with no output if we fail to grab it. The existing trylock() will already have greatly diminished utility/output for the failure case. This fixes an issue with reading the SQE fields, if the ring is being actively resized at the same time. Reported-by: Jann Horn <jannh@google.com> Fixes: 79cfe9e ("io_uring/register: add IORING_REGISTER_RESIZE_RINGS") Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent f446c63 commit d871198

File tree

1 file changed

+25
-23
lines changed

1 file changed

+25
-23
lines changed

io_uring/fdinfo.c

Lines changed: 25 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -86,13 +86,8 @@ static inline void napi_show_fdinfo(struct io_ring_ctx *ctx,
8686
}
8787
#endif
8888

89-
/*
90-
* Caller holds a reference to the file already, we don't need to do
91-
* anything else to get an extra reference.
92-
*/
93-
__cold void io_uring_show_fdinfo(struct seq_file *m, struct file *file)
89+
static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
9490
{
95-
struct io_ring_ctx *ctx = file->private_data;
9691
struct io_overflow_cqe *ocqe;
9792
struct io_rings *r = ctx->rings;
9893
struct rusage sq_usage;
@@ -106,7 +101,6 @@ __cold void io_uring_show_fdinfo(struct seq_file *m, struct file *file)
106101
unsigned int sq_entries, cq_entries;
107102
int sq_pid = -1, sq_cpu = -1;
108103
u64 sq_total_time = 0, sq_work_time = 0;
109-
bool has_lock;
110104
unsigned int i;
111105

112106
if (ctx->flags & IORING_SETUP_CQE32)
@@ -176,15 +170,7 @@ __cold void io_uring_show_fdinfo(struct seq_file *m, struct file *file)
176170
seq_printf(m, "\n");
177171
}
178172

179-
/*
180-
* Avoid ABBA deadlock between the seq lock and the io_uring mutex,
181-
* since fdinfo case grabs it in the opposite direction of normal use
182-
* cases. If we fail to get the lock, we just don't iterate any
183-
* structures that could be going away outside the io_uring mutex.
184-
*/
185-
has_lock = mutex_trylock(&ctx->uring_lock);
186-
187-
if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL)) {
173+
if (ctx->flags & IORING_SETUP_SQPOLL) {
188174
struct io_sq_data *sq = ctx->sq_data;
189175

190176
/*
@@ -206,7 +192,7 @@ __cold void io_uring_show_fdinfo(struct seq_file *m, struct file *file)
206192
seq_printf(m, "SqTotalTime:\t%llu\n", sq_total_time);
207193
seq_printf(m, "SqWorkTime:\t%llu\n", sq_work_time);
208194
seq_printf(m, "UserFiles:\t%u\n", ctx->file_table.data.nr);
209-
for (i = 0; has_lock && i < ctx->file_table.data.nr; i++) {
195+
for (i = 0; i < ctx->file_table.data.nr; i++) {
210196
struct file *f = NULL;
211197

212198
if (ctx->file_table.data.nodes[i])
@@ -218,7 +204,7 @@ __cold void io_uring_show_fdinfo(struct seq_file *m, struct file *file)
218204
}
219205
}
220206
seq_printf(m, "UserBufs:\t%u\n", ctx->buf_table.nr);
221-
for (i = 0; has_lock && i < ctx->buf_table.nr; i++) {
207+
for (i = 0; i < ctx->buf_table.nr; i++) {
222208
struct io_mapped_ubuf *buf = NULL;
223209

224210
if (ctx->buf_table.nodes[i])
@@ -228,7 +214,7 @@ __cold void io_uring_show_fdinfo(struct seq_file *m, struct file *file)
228214
else
229215
seq_printf(m, "%5u: <none>\n", i);
230216
}
231-
if (has_lock && !xa_empty(&ctx->personalities)) {
217+
if (!xa_empty(&ctx->personalities)) {
232218
unsigned long index;
233219
const struct cred *cred;
234220

@@ -238,7 +224,7 @@ __cold void io_uring_show_fdinfo(struct seq_file *m, struct file *file)
238224
}
239225

240226
seq_puts(m, "PollList:\n");
241-
for (i = 0; has_lock && i < (1U << ctx->cancel_table.hash_bits); i++) {
227+
for (i = 0; i < (1U << ctx->cancel_table.hash_bits); i++) {
242228
struct io_hash_bucket *hb = &ctx->cancel_table.hbs[i];
243229
struct io_kiocb *req;
244230

@@ -247,9 +233,6 @@ __cold void io_uring_show_fdinfo(struct seq_file *m, struct file *file)
247233
task_work_pending(req->tctx->task));
248234
}
249235

250-
if (has_lock)
251-
mutex_unlock(&ctx->uring_lock);
252-
253236
seq_puts(m, "CqOverflowList:\n");
254237
spin_lock(&ctx->completion_lock);
255238
list_for_each_entry(ocqe, &ctx->cq_overflow_list, list) {
@@ -262,4 +245,23 @@ __cold void io_uring_show_fdinfo(struct seq_file *m, struct file *file)
262245
spin_unlock(&ctx->completion_lock);
263246
napi_show_fdinfo(ctx, m);
264247
}
248+
249+
/*
250+
* Caller holds a reference to the file already, we don't need to do
251+
* anything else to get an extra reference.
252+
*/
253+
__cold void io_uring_show_fdinfo(struct seq_file *m, struct file *file)
254+
{
255+
struct io_ring_ctx *ctx = file->private_data;
256+
257+
/*
258+
* Avoid ABBA deadlock between the seq lock and the io_uring mutex,
259+
* since fdinfo case grabs it in the opposite direction of normal use
260+
* cases.
261+
*/
262+
if (mutex_trylock(&ctx->uring_lock)) {
263+
__io_uring_show_fdinfo(ctx, m);
264+
mutex_unlock(&ctx->uring_lock);
265+
}
266+
}
265267
#endif

0 commit comments

Comments
 (0)