Skip to content

Commit 6be6d11

Browse files
Chengming Zhouaxboe
authored andcommitted
blk-mq: fix tags UAF when shrinking q->nr_hw_queues
When nr_hw_queues shrink, we free the excess tags before realloc'ing hw_ctxs for each queue. During that resize, we may need to access those tags, like blk_mq_tag_idle(hctx) will access queue shared tags. This can cause a slab use-after-free, as reported by KASAN. Fix it by moving the releasing of excess tags to the end. Fixes: e1dd7bc ("blk-mq: fix tags leak when shrink nr_hw_queues") Reported-by: Yi Zhang <yi.zhang@redhat.com> Closes: https://lore.kernel.org/all/CAHj4cs_CK63uoDpGBGZ6DN4OCTpzkR3UaVgK=LX8Owr8ej2ieQ@mail.gmail.com/ Cc: Ming Lei <ming.lei@redhat.com> Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com> Reviewed-by: Hannes Reinecke <hare@suse.de> Link: https://lore.kernel.org/r/20230908005702.2183908-1-chengming.zhou@linux.dev Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent 0bb80ec commit 6be6d11

File tree

1 file changed

+7
-6
lines changed

1 file changed

+7
-6
lines changed

block/blk-mq.c

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -4405,11 +4405,8 @@ static int blk_mq_realloc_tag_set_tags(struct blk_mq_tag_set *set,
44054405
struct blk_mq_tags **new_tags;
44064406
int i;
44074407

4408-
if (set->nr_hw_queues >= new_nr_hw_queues) {
4409-
for (i = new_nr_hw_queues; i < set->nr_hw_queues; i++)
4410-
__blk_mq_free_map_and_rqs(set, i);
4408+
if (set->nr_hw_queues >= new_nr_hw_queues)
44114409
goto done;
4412-
}
44134410

44144411
new_tags = kcalloc_node(new_nr_hw_queues, sizeof(struct blk_mq_tags *),
44154412
GFP_KERNEL, set->numa_node);
@@ -4719,7 +4716,8 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
47194716
{
47204717
struct request_queue *q;
47214718
LIST_HEAD(head);
4722-
int prev_nr_hw_queues;
4719+
int prev_nr_hw_queues = set->nr_hw_queues;
4720+
int i;
47234721

47244722
lockdep_assert_held(&set->tag_list_lock);
47254723

@@ -4746,7 +4744,6 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
47464744
blk_mq_sysfs_unregister_hctxs(q);
47474745
}
47484746

4749-
prev_nr_hw_queues = set->nr_hw_queues;
47504747
if (blk_mq_realloc_tag_set_tags(set, nr_hw_queues) < 0)
47514748
goto reregister;
47524749

@@ -4781,6 +4778,10 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
47814778

47824779
list_for_each_entry(q, &set->tag_list, tag_set_list)
47834780
blk_mq_unfreeze_queue(q);
4781+
4782+
/* Free the excess tags when nr_hw_queues shrink. */
4783+
for (i = set->nr_hw_queues; i < prev_nr_hw_queues; i++)
4784+
__blk_mq_free_map_and_rqs(set, i);
47844785
}
47854786

47864787
void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)

0 commit comments

Comments
 (0)