Skip to content

Commit 224749b

Browse files
Ming Leiaxboe
authored andcommitted
block: Revert "block: Fix potential deadlock while freezing queue and acquiring sysfs_lock"
This reverts commit be26ba9. Commit be26ba9 ("block: Fix potential deadlock while freezing queue and acquiring sysfs_loc") actually reverts commit 22465bb ("blk-mq: move cpuhp callback registering out of q->sysfs_lock"), and causes the original resctrl lockdep warning. So revert it and we need to fix the issue in another way. Cc: Nilay Shroff <nilay@linux.ibm.com> Fixes: be26ba9 ("block: Fix potential deadlock while freezing queue and acquiring sysfs_loc") Signed-off-by: Ming Lei <ming.lei@redhat.com> Link: https://lore.kernel.org/r/20241218101617.3275704-2-ming.lei@redhat.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent 51588b1 commit 224749b

File tree

3 files changed

+23
-26
lines changed

3 files changed

+23
-26
lines changed

block/blk-mq-sysfs.c

Lines changed: 10 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -275,13 +275,15 @@ void blk_mq_sysfs_unregister_hctxs(struct request_queue *q)
275275
struct blk_mq_hw_ctx *hctx;
276276
unsigned long i;
277277

278-
lockdep_assert_held(&q->sysfs_dir_lock);
279-
278+
mutex_lock(&q->sysfs_dir_lock);
280279
if (!q->mq_sysfs_init_done)
281-
return;
280+
goto unlock;
282281

283282
queue_for_each_hw_ctx(q, hctx, i)
284283
blk_mq_unregister_hctx(hctx);
284+
285+
unlock:
286+
mutex_unlock(&q->sysfs_dir_lock);
285287
}
286288

287289
int blk_mq_sysfs_register_hctxs(struct request_queue *q)
@@ -290,16 +292,18 @@ int blk_mq_sysfs_register_hctxs(struct request_queue *q)
290292
unsigned long i;
291293
int ret = 0;
292294

293-
lockdep_assert_held(&q->sysfs_dir_lock);
294-
295+
mutex_lock(&q->sysfs_dir_lock);
295296
if (!q->mq_sysfs_init_done)
296-
return ret;
297+
goto unlock;
297298

298299
queue_for_each_hw_ctx(q, hctx, i) {
299300
ret = blk_mq_register_hctx(hctx);
300301
if (ret)
301302
break;
302303
}
303304

305+
unlock:
306+
mutex_unlock(&q->sysfs_dir_lock);
307+
304308
return ret;
305309
}

block/blk-mq.c

Lines changed: 11 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -4453,8 +4453,7 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
44534453
unsigned long i, j;
44544454

44554455
/* protect against switching io scheduler */
4456-
lockdep_assert_held(&q->sysfs_lock);
4457-
4456+
mutex_lock(&q->sysfs_lock);
44584457
for (i = 0; i < set->nr_hw_queues; i++) {
44594458
int old_node;
44604459
int node = blk_mq_get_hctx_node(set, i);
@@ -4487,6 +4486,7 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
44874486

44884487
xa_for_each_start(&q->hctx_table, j, hctx, j)
44894488
blk_mq_exit_hctx(q, set, hctx, j);
4489+
mutex_unlock(&q->sysfs_lock);
44904490

44914491
/* unregister cpuhp callbacks for exited hctxs */
44924492
blk_mq_remove_hw_queues_cpuhp(q);
@@ -4518,14 +4518,10 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
45184518

45194519
xa_init(&q->hctx_table);
45204520

4521-
mutex_lock(&q->sysfs_lock);
4522-
45234521
blk_mq_realloc_hw_ctxs(set, q);
45244522
if (!q->nr_hw_queues)
45254523
goto err_hctxs;
45264524

4527-
mutex_unlock(&q->sysfs_lock);
4528-
45294525
INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
45304526
blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
45314527

@@ -4544,7 +4540,6 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
45444540
return 0;
45454541

45464542
err_hctxs:
4547-
mutex_unlock(&q->sysfs_lock);
45484543
blk_mq_release(q);
45494544
err_exit:
45504545
q->mq_ops = NULL;
@@ -4925,12 +4920,12 @@ static bool blk_mq_elv_switch_none(struct list_head *head,
49254920
return false;
49264921

49274922
/* q->elevator needs protection from ->sysfs_lock */
4928-
lockdep_assert_held(&q->sysfs_lock);
4923+
mutex_lock(&q->sysfs_lock);
49294924

49304925
/* the check has to be done with holding sysfs_lock */
49314926
if (!q->elevator) {
49324927
kfree(qe);
4933-
goto out;
4928+
goto unlock;
49344929
}
49354930

49364931
INIT_LIST_HEAD(&qe->node);
@@ -4940,7 +4935,9 @@ static bool blk_mq_elv_switch_none(struct list_head *head,
49404935
__elevator_get(qe->type);
49414936
list_add(&qe->node, head);
49424937
elevator_disable(q);
4943-
out:
4938+
unlock:
4939+
mutex_unlock(&q->sysfs_lock);
4940+
49444941
return true;
49454942
}
49464943

@@ -4969,9 +4966,11 @@ static void blk_mq_elv_switch_back(struct list_head *head,
49694966
list_del(&qe->node);
49704967
kfree(qe);
49714968

4969+
mutex_lock(&q->sysfs_lock);
49724970
elevator_switch(q, t);
49734971
/* drop the reference acquired in blk_mq_elv_switch_none */
49744972
elevator_put(t);
4973+
mutex_unlock(&q->sysfs_lock);
49754974
}
49764975

49774976
static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
@@ -4991,11 +4990,8 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
49914990
if (set->nr_maps == 1 && nr_hw_queues == set->nr_hw_queues)
49924991
return;
49934992

4994-
list_for_each_entry(q, &set->tag_list, tag_set_list) {
4995-
mutex_lock(&q->sysfs_dir_lock);
4996-
mutex_lock(&q->sysfs_lock);
4993+
list_for_each_entry(q, &set->tag_list, tag_set_list)
49974994
blk_mq_freeze_queue(q);
4998-
}
49994995
/*
50004996
* Switch IO scheduler to 'none', cleaning up the data associated
50014997
* with the previous scheduler. We will switch back once we are done
@@ -5051,11 +5047,8 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
50515047
list_for_each_entry(q, &set->tag_list, tag_set_list)
50525048
blk_mq_elv_switch_back(&head, q);
50535049

5054-
list_for_each_entry(q, &set->tag_list, tag_set_list) {
5050+
list_for_each_entry(q, &set->tag_list, tag_set_list)
50555051
blk_mq_unfreeze_queue(q);
5056-
mutex_unlock(&q->sysfs_lock);
5057-
mutex_unlock(&q->sysfs_dir_lock);
5058-
}
50595052

50605053
/* Free the excess tags when nr_hw_queues shrink. */
50615054
for (i = set->nr_hw_queues; i < prev_nr_hw_queues; i++)

block/blk-sysfs.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -706,11 +706,11 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
706706
if (entry->load_module)
707707
entry->load_module(disk, page, length);
708708

709-
mutex_lock(&q->sysfs_lock);
710709
blk_mq_freeze_queue(q);
710+
mutex_lock(&q->sysfs_lock);
711711
res = entry->store(disk, page, length);
712-
blk_mq_unfreeze_queue(q);
713712
mutex_unlock(&q->sysfs_lock);
713+
blk_mq_unfreeze_queue(q);
714714
return res;
715715
}
716716

0 commit comments

Comments
 (0)