Skip to content

Commit 3c7c250

Browse files
committed
Merge tag 'block-5.17-2022-01-21' of git://git.kernel.dk/linux-block
Pull block fixes from Jens Axboe: "Various little minor fixes that should go into this release: - Fix issue with cloned bios and IO accounting (Christoph) - Remove redundant assignments (Colin, GuoYong) - Fix an issue with the mq-deadline async_depth sysfs interface (me) - Fix brd module loading race (Tetsuo) - Shared tag map wakeup fix (Laibin) - End of bdev read fix (OGAWA) - srcu leak fix (Ming)" * tag 'block-5.17-2022-01-21' of git://git.kernel.dk/linux-block: block: fix async_depth sysfs interface for mq-deadline block: Fix wrong offset in bio_truncate() block: assign bi_bdev for cloned bios in blk_rq_prep_clone block: cleanup q->srcu block: Remove unnecessary variable assignment brd: remove brd_devices_mutex mutex aoe: remove redundant assignment on variable n loop: remove redundant initialization of pointer node blk-mq: fix tag_get wait task can't be awakened
2 parents f3a7822 + 46cdc45 commit 3c7c250

File tree

10 files changed

+106
-59
lines changed

10 files changed

+106
-59
lines changed

block/bio.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -569,7 +569,8 @@ static void bio_truncate(struct bio *bio, unsigned new_size)
569569
offset = new_size - done;
570570
else
571571
offset = 0;
572-
zero_user(bv.bv_page, offset, bv.bv_len - offset);
572+
zero_user(bv.bv_page, bv.bv_offset + offset,
573+
bv.bv_len - offset);
573574
truncated = true;
574575
}
575576
done += bv.bv_len;

block/blk-mq-tag.c

Lines changed: 33 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,21 @@
1616
#include "blk-mq-sched.h"
1717
#include "blk-mq-tag.h"
1818

19+
/*
20+
* Recalculate wakeup batch when tag is shared by hctx.
21+
*/
22+
static void blk_mq_update_wake_batch(struct blk_mq_tags *tags,
23+
unsigned int users)
24+
{
25+
if (!users)
26+
return;
27+
28+
sbitmap_queue_recalculate_wake_batch(&tags->bitmap_tags,
29+
users);
30+
sbitmap_queue_recalculate_wake_batch(&tags->breserved_tags,
31+
users);
32+
}
33+
1934
/*
2035
* If a previously inactive queue goes active, bump the active user count.
2136
* We need to do this before try to allocate driver tag, then even if fail
@@ -24,18 +39,26 @@
2439
*/
2540
bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
2641
{
42+
unsigned int users;
43+
2744
if (blk_mq_is_shared_tags(hctx->flags)) {
2845
struct request_queue *q = hctx->queue;
2946

30-
if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags) &&
31-
!test_and_set_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
32-
atomic_inc(&hctx->tags->active_queues);
47+
if (test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags) ||
48+
test_and_set_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags)) {
49+
return true;
50+
}
3351
} else {
34-
if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
35-
!test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
36-
atomic_inc(&hctx->tags->active_queues);
52+
if (test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) ||
53+
test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) {
54+
return true;
55+
}
3756
}
3857

58+
users = atomic_inc_return(&hctx->tags->active_queues);
59+
60+
blk_mq_update_wake_batch(hctx->tags, users);
61+
3962
return true;
4063
}
4164

@@ -56,6 +79,7 @@ void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
5679
void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
5780
{
5881
struct blk_mq_tags *tags = hctx->tags;
82+
unsigned int users;
5983

6084
if (blk_mq_is_shared_tags(hctx->flags)) {
6185
struct request_queue *q = hctx->queue;
@@ -68,7 +92,9 @@ void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
6892
return;
6993
}
7094

71-
atomic_dec(&tags->active_queues);
95+
users = atomic_dec_return(&tags->active_queues);
96+
97+
blk_mq_update_wake_batch(tags, users);
7298

7399
blk_mq_tag_wakeup_all(tags, false);
74100
}

block/blk-mq.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2976,6 +2976,7 @@ int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
29762976
bio = bio_clone_fast(bio_src, gfp_mask, bs);
29772977
if (!bio)
29782978
goto free_and_out;
2979+
bio->bi_bdev = rq->q->disk->part0;
29792980

29802981
if (bio_ctr && bio_ctr(bio, bio_src, data))
29812982
goto free_and_out;

block/blk-sysfs.c

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -811,6 +811,9 @@ static void blk_release_queue(struct kobject *kobj)
811811

812812
bioset_exit(&q->bio_split);
813813

814+
if (blk_queue_has_srcu(q))
815+
cleanup_srcu_struct(q->srcu);
816+
814817
ida_simple_remove(&blk_queue_ida, q->id);
815818
call_rcu(&q->rcu_head, blk_free_queue_rcu);
816819
}
@@ -887,7 +890,6 @@ int blk_register_queue(struct gendisk *disk)
887890
kobject_uevent(&q->elevator->kobj, KOBJ_ADD);
888891
mutex_unlock(&q->sysfs_lock);
889892

890-
ret = 0;
891893
unlock:
892894
mutex_unlock(&q->sysfs_dir_lock);
893895

block/mq-deadline.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -865,7 +865,7 @@ SHOW_JIFFIES(deadline_write_expire_show, dd->fifo_expire[DD_WRITE]);
865865
SHOW_JIFFIES(deadline_prio_aging_expire_show, dd->prio_aging_expire);
866866
SHOW_INT(deadline_writes_starved_show, dd->writes_starved);
867867
SHOW_INT(deadline_front_merges_show, dd->front_merges);
868-
SHOW_INT(deadline_async_depth_show, dd->front_merges);
868+
SHOW_INT(deadline_async_depth_show, dd->async_depth);
869869
SHOW_INT(deadline_fifo_batch_show, dd->fifo_batch);
870870
#undef SHOW_INT
871871
#undef SHOW_JIFFIES
@@ -895,7 +895,7 @@ STORE_JIFFIES(deadline_write_expire_store, &dd->fifo_expire[DD_WRITE], 0, INT_MA
895895
STORE_JIFFIES(deadline_prio_aging_expire_store, &dd->prio_aging_expire, 0, INT_MAX);
896896
STORE_INT(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX);
897897
STORE_INT(deadline_front_merges_store, &dd->front_merges, 0, 1);
898-
STORE_INT(deadline_async_depth_store, &dd->front_merges, 1, INT_MAX);
898+
STORE_INT(deadline_async_depth_store, &dd->async_depth, 1, INT_MAX);
899899
STORE_INT(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX);
900900
#undef STORE_FUNCTION
901901
#undef STORE_INT

drivers/block/aoe/aoecmd.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -122,7 +122,7 @@ newtag(struct aoedev *d)
122122
register ulong n;
123123

124124
n = jiffies & 0xffff;
125-
return n |= (++d->lasttag & 0x7fff) << 16;
125+
return n | (++d->lasttag & 0x7fff) << 16;
126126
}
127127

128128
static u32

drivers/block/brd.c

Lines changed: 30 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -362,7 +362,6 @@ __setup("ramdisk_size=", ramdisk_size);
362362
* (should share code eventually).
363363
*/
364364
static LIST_HEAD(brd_devices);
365-
static DEFINE_MUTEX(brd_devices_mutex);
366365
static struct dentry *brd_debugfs_dir;
367366

368367
static int brd_alloc(int i)
@@ -372,21 +371,14 @@ static int brd_alloc(int i)
372371
char buf[DISK_NAME_LEN];
373372
int err = -ENOMEM;
374373

375-
mutex_lock(&brd_devices_mutex);
376-
list_for_each_entry(brd, &brd_devices, brd_list) {
377-
if (brd->brd_number == i) {
378-
mutex_unlock(&brd_devices_mutex);
374+
list_for_each_entry(brd, &brd_devices, brd_list)
375+
if (brd->brd_number == i)
379376
return -EEXIST;
380-
}
381-
}
382377
brd = kzalloc(sizeof(*brd), GFP_KERNEL);
383-
if (!brd) {
384-
mutex_unlock(&brd_devices_mutex);
378+
if (!brd)
385379
return -ENOMEM;
386-
}
387380
brd->brd_number = i;
388381
list_add_tail(&brd->brd_list, &brd_devices);
389-
mutex_unlock(&brd_devices_mutex);
390382

391383
spin_lock_init(&brd->brd_lock);
392384
INIT_RADIX_TREE(&brd->brd_pages, GFP_ATOMIC);
@@ -429,9 +421,7 @@ static int brd_alloc(int i)
429421
out_cleanup_disk:
430422
blk_cleanup_disk(disk);
431423
out_free_dev:
432-
mutex_lock(&brd_devices_mutex);
433424
list_del(&brd->brd_list);
434-
mutex_unlock(&brd_devices_mutex);
435425
kfree(brd);
436426
return err;
437427
}
@@ -441,15 +431,19 @@ static void brd_probe(dev_t dev)
441431
brd_alloc(MINOR(dev) / max_part);
442432
}
443433

444-
static void brd_del_one(struct brd_device *brd)
434+
static void brd_cleanup(void)
445435
{
446-
del_gendisk(brd->brd_disk);
447-
blk_cleanup_disk(brd->brd_disk);
448-
brd_free_pages(brd);
449-
mutex_lock(&brd_devices_mutex);
450-
list_del(&brd->brd_list);
451-
mutex_unlock(&brd_devices_mutex);
452-
kfree(brd);
436+
struct brd_device *brd, *next;
437+
438+
debugfs_remove_recursive(brd_debugfs_dir);
439+
440+
list_for_each_entry_safe(brd, next, &brd_devices, brd_list) {
441+
del_gendisk(brd->brd_disk);
442+
blk_cleanup_disk(brd->brd_disk);
443+
brd_free_pages(brd);
444+
list_del(&brd->brd_list);
445+
kfree(brd);
446+
}
453447
}
454448

455449
static inline void brd_check_and_reset_par(void)
@@ -473,9 +467,18 @@ static inline void brd_check_and_reset_par(void)
473467

474468
static int __init brd_init(void)
475469
{
476-
struct brd_device *brd, *next;
477470
int err, i;
478471

472+
brd_check_and_reset_par();
473+
474+
brd_debugfs_dir = debugfs_create_dir("ramdisk_pages", NULL);
475+
476+
for (i = 0; i < rd_nr; i++) {
477+
err = brd_alloc(i);
478+
if (err)
479+
goto out_free;
480+
}
481+
479482
/*
480483
* brd module now has a feature to instantiate underlying device
481484
* structure on-demand, provided that there is an access dev node.
@@ -491,42 +494,26 @@ static int __init brd_init(void)
491494
* dynamically.
492495
*/
493496

494-
if (__register_blkdev(RAMDISK_MAJOR, "ramdisk", brd_probe))
495-
return -EIO;
496-
497-
brd_check_and_reset_par();
498-
499-
brd_debugfs_dir = debugfs_create_dir("ramdisk_pages", NULL);
500-
501-
for (i = 0; i < rd_nr; i++) {
502-
err = brd_alloc(i);
503-
if (err)
504-
goto out_free;
497+
if (__register_blkdev(RAMDISK_MAJOR, "ramdisk", brd_probe)) {
498+
err = -EIO;
499+
goto out_free;
505500
}
506501

507502
pr_info("brd: module loaded\n");
508503
return 0;
509504

510505
out_free:
511-
unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
512-
debugfs_remove_recursive(brd_debugfs_dir);
513-
514-
list_for_each_entry_safe(brd, next, &brd_devices, brd_list)
515-
brd_del_one(brd);
506+
brd_cleanup();
516507

517508
pr_info("brd: module NOT loaded !!!\n");
518509
return err;
519510
}
520511

521512
static void __exit brd_exit(void)
522513
{
523-
struct brd_device *brd, *next;
524514

525515
unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
526-
debugfs_remove_recursive(brd_debugfs_dir);
527-
528-
list_for_each_entry_safe(brd, next, &brd_devices, brd_list)
529-
brd_del_one(brd);
516+
brd_cleanup();
530517

531518
pr_info("brd: module unloaded\n");
532519
}

drivers/block/loop.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -820,7 +820,7 @@ static inline int queue_on_root_worker(struct cgroup_subsys_state *css)
820820

821821
static void loop_queue_work(struct loop_device *lo, struct loop_cmd *cmd)
822822
{
823-
struct rb_node **node = &(lo->worker_tree.rb_node), *parent = NULL;
823+
struct rb_node **node, *parent = NULL;
824824
struct loop_worker *cur_worker, *worker = NULL;
825825
struct work_struct *work;
826826
struct list_head *cmd_list;

include/linux/sbitmap.h

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -415,6 +415,17 @@ static inline void sbitmap_queue_free(struct sbitmap_queue *sbq)
415415
sbitmap_free(&sbq->sb);
416416
}
417417

418+
/**
419+
* sbitmap_queue_recalculate_wake_batch() - Recalculate wake batch
420+
* @sbq: Bitmap queue to recalculate wake batch.
421+
* @users: Number of shares.
422+
*
423+
* Like sbitmap_queue_update_wake_batch(), this will calculate wake batch
424+
* by depth. This interface is for HCTX shared tags or queue shared tags.
425+
*/
426+
void sbitmap_queue_recalculate_wake_batch(struct sbitmap_queue *sbq,
427+
unsigned int users);
428+
418429
/**
419430
* sbitmap_queue_resize() - Resize a &struct sbitmap_queue.
420431
* @sbq: Bitmap queue to resize.

lib/sbitmap.c

Lines changed: 22 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -457,10 +457,9 @@ int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
457457
}
458458
EXPORT_SYMBOL_GPL(sbitmap_queue_init_node);
459459

460-
static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq,
461-
unsigned int depth)
460+
static inline void __sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq,
461+
unsigned int wake_batch)
462462
{
463-
unsigned int wake_batch = sbq_calc_wake_batch(sbq, depth);
464463
int i;
465464

466465
if (sbq->wake_batch != wake_batch) {
@@ -476,6 +475,26 @@ static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq,
476475
}
477476
}
478477

478+
static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq,
479+
unsigned int depth)
480+
{
481+
unsigned int wake_batch;
482+
483+
wake_batch = sbq_calc_wake_batch(sbq, depth);
484+
__sbitmap_queue_update_wake_batch(sbq, wake_batch);
485+
}
486+
487+
void sbitmap_queue_recalculate_wake_batch(struct sbitmap_queue *sbq,
488+
unsigned int users)
489+
{
490+
unsigned int wake_batch;
491+
492+
wake_batch = clamp_val((sbq->sb.depth + users - 1) /
493+
users, 4, SBQ_WAKE_BATCH);
494+
__sbitmap_queue_update_wake_batch(sbq, wake_batch);
495+
}
496+
EXPORT_SYMBOL_GPL(sbitmap_queue_recalculate_wake_batch);
497+
479498
void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth)
480499
{
481500
sbitmap_queue_update_wake_batch(sbq, depth);

0 commit comments

Comments
 (0)