Skip to content

Commit 61564e7

Browse files
committed
Merge tag 'block-5.16-2021-11-19' of git://git.kernel.dk/linux-block
Pull block fixes from Jens Axboe: - Flip a cap check to avoid a selinux error (Alistair) - Fix for a regression this merge window where we can miss a queue ref put (me) - Un-mark pstore-blk as broken, as the condition that triggered that change has been rectified (Kees) - Queue quiesce and sync fixes (Ming) - FUA insertion fix (Ming) - blk-cgroup error path put fix (Yu) * tag 'block-5.16-2021-11-19' of git://git.kernel.dk/linux-block: blk-mq: don't insert FUA request with data into scheduler queue blk-cgroup: fix missing put device in error path from blkg_conf_pref() block: avoid to quiesce queue in elevator_init_mq Revert "mark pstore-blk as broken" blk-mq: cancel blk-mq dispatch work in both blk_cleanup_queue and disk_release() block: fix missing queue put in error path block: Check ADMIN before NICE for IOPRIO_CLASS_RT
2 parents b100274 + 2b504bd commit 61564e7

File tree

11 files changed

+59
-35
lines changed

11 files changed

+59
-35
lines changed

block/blk-cgroup.c

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -640,7 +640,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
640640
*/
641641
ret = blk_queue_enter(q, 0);
642642
if (ret)
643-
return ret;
643+
goto fail;
644644

645645
rcu_read_lock();
646646
spin_lock_irq(&q->queue_lock);
@@ -676,13 +676,13 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
676676
new_blkg = blkg_alloc(pos, q, GFP_KERNEL);
677677
if (unlikely(!new_blkg)) {
678678
ret = -ENOMEM;
679-
goto fail;
679+
goto fail_exit_queue;
680680
}
681681

682682
if (radix_tree_preload(GFP_KERNEL)) {
683683
blkg_free(new_blkg);
684684
ret = -ENOMEM;
685-
goto fail;
685+
goto fail_exit_queue;
686686
}
687687

688688
rcu_read_lock();
@@ -722,9 +722,10 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
722722
fail_unlock:
723723
spin_unlock_irq(&q->queue_lock);
724724
rcu_read_unlock();
725+
fail_exit_queue:
726+
blk_queue_exit(q);
725727
fail:
726728
blkdev_put_no_open(bdev);
727-
blk_queue_exit(q);
728729
/*
729730
* If queue was bypassing, we should retry. Do so after a
730731
* short msleep(). It isn't strictly necessary but queue

block/blk-core.c

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -363,8 +363,10 @@ void blk_cleanup_queue(struct request_queue *q)
363363
blk_queue_flag_set(QUEUE_FLAG_DEAD, q);
364364

365365
blk_sync_queue(q);
366-
if (queue_is_mq(q))
366+
if (queue_is_mq(q)) {
367+
blk_mq_cancel_work_sync(q);
367368
blk_mq_exit_queue(q);
369+
}
368370

369371
/*
370372
* In theory, request pool of sched_tags belongs to request queue.

block/blk-flush.c

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -379,7 +379,7 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
379379
* @rq is being submitted. Analyze what needs to be done and put it on the
380380
* right queue.
381381
*/
382-
bool blk_insert_flush(struct request *rq)
382+
void blk_insert_flush(struct request *rq)
383383
{
384384
struct request_queue *q = rq->q;
385385
unsigned long fflags = q->queue_flags; /* may change, cache */
@@ -409,7 +409,7 @@ bool blk_insert_flush(struct request *rq)
409409
*/
410410
if (!policy) {
411411
blk_mq_end_request(rq, 0);
412-
return true;
412+
return;
413413
}
414414

415415
BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */
@@ -420,8 +420,10 @@ bool blk_insert_flush(struct request *rq)
420420
* for normal execution.
421421
*/
422422
if ((policy & REQ_FSEQ_DATA) &&
423-
!(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH)))
424-
return false;
423+
!(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
424+
blk_mq_request_bypass_insert(rq, false, true);
425+
return;
426+
}
425427

426428
/*
427429
* @rq should go through flush machinery. Mark it part of flush
@@ -437,8 +439,6 @@ bool blk_insert_flush(struct request *rq)
437439
spin_lock_irq(&fq->mq_flush_lock);
438440
blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
439441
spin_unlock_irq(&fq->mq_flush_lock);
440-
441-
return true;
442442
}
443443

444444
/**

block/blk-mq.c

Lines changed: 24 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -2543,8 +2543,7 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
25432543
return NULL;
25442544
}
25452545

2546-
static inline bool blk_mq_can_use_cached_rq(struct request *rq,
2547-
struct bio *bio)
2546+
static inline bool blk_mq_can_use_cached_rq(struct request *rq, struct bio *bio)
25482547
{
25492548
if (blk_mq_get_hctx_type(bio->bi_opf) != rq->mq_hctx->type)
25502549
return false;
@@ -2565,7 +2564,6 @@ static inline struct request *blk_mq_get_request(struct request_queue *q,
25652564
bool checked = false;
25662565

25672566
if (plug) {
2568-
25692567
rq = rq_list_peek(&plug->cached_rq);
25702568
if (rq && rq->q == q) {
25712569
if (unlikely(!submit_bio_checks(bio)))
@@ -2587,12 +2585,14 @@ static inline struct request *blk_mq_get_request(struct request_queue *q,
25872585
fallback:
25882586
if (unlikely(bio_queue_enter(bio)))
25892587
return NULL;
2590-
if (!checked && !submit_bio_checks(bio))
2591-
return NULL;
2588+
if (unlikely(!checked && !submit_bio_checks(bio)))
2589+
goto out_put;
25922590
rq = blk_mq_get_new_requests(q, plug, bio, nsegs, same_queue_rq);
2593-
if (!rq)
2594-
blk_queue_exit(q);
2595-
return rq;
2591+
if (rq)
2592+
return rq;
2593+
out_put:
2594+
blk_queue_exit(q);
2595+
return NULL;
25962596
}
25972597

25982598
/**
@@ -2647,8 +2647,10 @@ void blk_mq_submit_bio(struct bio *bio)
26472647
return;
26482648
}
26492649

2650-
if (op_is_flush(bio->bi_opf) && blk_insert_flush(rq))
2650+
if (op_is_flush(bio->bi_opf)) {
2651+
blk_insert_flush(rq);
26512652
return;
2653+
}
26522654

26532655
if (plug && (q->nr_hw_queues == 1 ||
26542656
blk_mq_is_shared_tags(rq->mq_hctx->flags) ||
@@ -4417,6 +4419,19 @@ unsigned int blk_mq_rq_cpu(struct request *rq)
44174419
}
44184420
EXPORT_SYMBOL(blk_mq_rq_cpu);
44194421

4422+
void blk_mq_cancel_work_sync(struct request_queue *q)
4423+
{
4424+
if (queue_is_mq(q)) {
4425+
struct blk_mq_hw_ctx *hctx;
4426+
int i;
4427+
4428+
cancel_delayed_work_sync(&q->requeue_work);
4429+
4430+
queue_for_each_hw_ctx(q, hctx, i)
4431+
cancel_delayed_work_sync(&hctx->run_work);
4432+
}
4433+
}
4434+
44204435
static int __init blk_mq_init(void)
44214436
{
44224437
int i;

block/blk-mq.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -128,6 +128,8 @@ extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
128128
void blk_mq_free_plug_rqs(struct blk_plug *plug);
129129
void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
130130

131+
void blk_mq_cancel_work_sync(struct request_queue *q);
132+
131133
void blk_mq_release(struct request_queue *q);
132134

133135
static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,

block/blk-sysfs.c

Lines changed: 0 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -791,16 +791,6 @@ static void blk_release_queue(struct kobject *kobj)
791791

792792
blk_free_queue_stats(q->stats);
793793

794-
if (queue_is_mq(q)) {
795-
struct blk_mq_hw_ctx *hctx;
796-
int i;
797-
798-
cancel_delayed_work_sync(&q->requeue_work);
799-
800-
queue_for_each_hw_ctx(q, hctx, i)
801-
cancel_delayed_work_sync(&hctx->run_work);
802-
}
803-
804794
blk_exit_queue(q);
805795

806796
blk_queue_free_zone_bitmaps(q);

block/blk.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -271,7 +271,7 @@ void __blk_account_io_done(struct request *req, u64 now);
271271
*/
272272
#define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
273273

274-
bool blk_insert_flush(struct request *rq);
274+
void blk_insert_flush(struct request *rq);
275275

276276
int elevator_switch_mq(struct request_queue *q,
277277
struct elevator_type *new_e);

block/elevator.c

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -694,12 +694,18 @@ void elevator_init_mq(struct request_queue *q)
694694
if (!e)
695695
return;
696696

697+
/*
698+
* We are called before adding disk, when there isn't any FS I/O,
699+
* so freezing queue plus canceling dispatch work is enough to
700+
* drain any dispatch activities originated from passthrough
701+
* requests, then no need to quiesce queue which may add long boot
702+
* latency, especially when lots of disks are involved.
703+
*/
697704
blk_mq_freeze_queue(q);
698-
blk_mq_quiesce_queue(q);
705+
blk_mq_cancel_work_sync(q);
699706

700707
err = blk_mq_init_sched(q, e);
701708

702-
blk_mq_unquiesce_queue(q);
703709
blk_mq_unfreeze_queue(q);
704710

705711
if (err) {

block/genhd.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1111,6 +1111,8 @@ static void disk_release(struct device *dev)
11111111
might_sleep();
11121112
WARN_ON_ONCE(disk_live(disk));
11131113

1114+
blk_mq_cancel_work_sync(disk->queue);
1115+
11141116
disk_release_events(disk);
11151117
kfree(disk->random);
11161118
xa_destroy(&disk->part_tbl);

block/ioprio.c

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,14 @@ int ioprio_check_cap(int ioprio)
6969

7070
switch (class) {
7171
case IOPRIO_CLASS_RT:
72-
if (!capable(CAP_SYS_NICE) && !capable(CAP_SYS_ADMIN))
72+
/*
73+
* Originally this only checked for CAP_SYS_ADMIN,
74+
* which was implicitly allowed for pid 0 by security
75+
* modules such as SELinux. Make sure we check
76+
* CAP_SYS_ADMIN first to avoid a denial/avc for
77+
* possibly missing CAP_SYS_NICE permission.
78+
*/
79+
if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_NICE))
7380
return -EPERM;
7481
fallthrough;
7582
/* rt has prio field too */

0 commit comments

Comments
 (0)