Skip to content

Commit 3c72728

Browse files
committed
Merge tag 'for-6.16/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm
Pull device mapper updates from Mikulas Patocka: - better error handling when reloading a table - use use generic disable_* functions instead of open coding them - lock queue limits when reading them - remove unneeded kvfree from alloc_targets - fix BLK_FEAT_ATOMIC_WRITES - pass through operations on wrapped inline crypto keys - dm-verity: - use softirq context only when !need_resched() - fix a memory leak if some arguments are specified multiple times - dm-mpath: - interface for explicit probing of active paths - replace spin_lock_irqsave with spin_lock_irq - dm-delay: don't busy-wait in kthread - dm-bufio: remove maximum age based eviction - dm-flakey: various fixes - vdo indexer: don't read request structure after enqueuing - dm-zone: Use bdev_*() helper functions where applicable - dm-mirror: fix a tiny race condition - dm-stripe: small code cleanup * tag 'for-6.16/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm: (29 commits) dm-stripe: small code cleanup dm-verity: fix a memory leak if some arguments are specified multiple times dm-mirror: fix a tiny race condition dm-table: check BLK_FEAT_ATOMIC_WRITES inside limits_lock dm mpath: replace spin_lock_irqsave with spin_lock_irq dm-mpath: Don't grab work_mutex while probing paths dm-zone: Use bdev_*() helper functions where applicable dm vdo indexer: don't read request structure after enqueuing dm: pass through operations on wrapped inline crypto keys blk-crypto: export wrapped key functions dm-table: Set BLK_FEAT_ATOMIC_WRITES for target queue limits dm mpath: Interface for explicit probing of active paths dm: Allow .prepare_ioctl to handle ioctls directly dm-flakey: make corrupting read bios work dm-flakey: remove useless ERROR_READS check in flakey_end_io dm-flakey: error all IOs when num_features is absent dm-flakey: Clean up parsing messages dm: remove unneeded kvfree from alloc_targets dm-bufio: remove maximum age based eviction dm-verity: use softirq context only when !need_resched() ...
2 parents 0939bd2 + 9f2f631 commit 3c72728

27 files changed

+746
-391
lines changed

block/blk-crypto-profile.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -501,6 +501,7 @@ int blk_crypto_derive_sw_secret(struct block_device *bdev,
501501
blk_crypto_hw_exit(profile);
502502
return err;
503503
}
504+
EXPORT_SYMBOL_GPL(blk_crypto_derive_sw_secret);
504505

505506
int blk_crypto_import_key(struct blk_crypto_profile *profile,
506507
const u8 *raw_key, size_t raw_key_size,
@@ -520,6 +521,7 @@ int blk_crypto_import_key(struct blk_crypto_profile *profile,
520521
blk_crypto_hw_exit(profile);
521522
return ret;
522523
}
524+
EXPORT_SYMBOL_GPL(blk_crypto_import_key);
523525

524526
int blk_crypto_generate_key(struct blk_crypto_profile *profile,
525527
u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE])
@@ -537,6 +539,7 @@ int blk_crypto_generate_key(struct blk_crypto_profile *profile,
537539
blk_crypto_hw_exit(profile);
538540
return ret;
539541
}
542+
EXPORT_SYMBOL_GPL(blk_crypto_generate_key);
540543

541544
int blk_crypto_prepare_key(struct blk_crypto_profile *profile,
542545
const u8 *lt_key, size_t lt_key_size,
@@ -556,6 +559,7 @@ int blk_crypto_prepare_key(struct blk_crypto_profile *profile,
556559
blk_crypto_hw_exit(profile);
557560
return ret;
558561
}
562+
EXPORT_SYMBOL_GPL(blk_crypto_prepare_key);
559563

560564
/**
561565
* blk_crypto_intersect_capabilities() - restrict supported crypto capabilities

drivers/md/dm-bufio.c

Lines changed: 36 additions & 153 deletions
Original file line numberDiff line numberDiff line change
@@ -40,16 +40,6 @@
4040
#define DM_BUFIO_WRITEBACK_RATIO 3
4141
#define DM_BUFIO_LOW_WATERMARK_RATIO 16
4242

43-
/*
44-
* Check buffer ages in this interval (seconds)
45-
*/
46-
#define DM_BUFIO_WORK_TIMER_SECS 30
47-
48-
/*
49-
* Free buffers when they are older than this (seconds)
50-
*/
51-
#define DM_BUFIO_DEFAULT_AGE_SECS 300
52-
5343
/*
5444
* The nr of bytes of cached data to keep around.
5545
*/
@@ -1057,10 +1047,8 @@ static unsigned long dm_bufio_cache_size_latch;
10571047

10581048
static DEFINE_SPINLOCK(global_spinlock);
10591049

1060-
/*
1061-
* Buffers are freed after this timeout
1062-
*/
1063-
static unsigned int dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
1050+
static unsigned int dm_bufio_max_age; /* No longer does anything */
1051+
10641052
static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
10651053

10661054
static unsigned long dm_bufio_peak_allocated;
@@ -1088,7 +1076,6 @@ static LIST_HEAD(dm_bufio_all_clients);
10881076
static DEFINE_MUTEX(dm_bufio_clients_lock);
10891077

10901078
static struct workqueue_struct *dm_bufio_wq;
1091-
static struct delayed_work dm_bufio_cleanup_old_work;
10921079
static struct work_struct dm_bufio_replacement_work;
10931080

10941081

@@ -2680,130 +2667,6 @@ EXPORT_SYMBOL_GPL(dm_bufio_set_sector_offset);
26802667

26812668
/*--------------------------------------------------------------*/
26822669

2683-
static unsigned int get_max_age_hz(void)
2684-
{
2685-
unsigned int max_age = READ_ONCE(dm_bufio_max_age);
2686-
2687-
if (max_age > UINT_MAX / HZ)
2688-
max_age = UINT_MAX / HZ;
2689-
2690-
return max_age * HZ;
2691-
}
2692-
2693-
static bool older_than(struct dm_buffer *b, unsigned long age_hz)
2694-
{
2695-
return time_after_eq(jiffies, READ_ONCE(b->last_accessed) + age_hz);
2696-
}
2697-
2698-
struct evict_params {
2699-
gfp_t gfp;
2700-
unsigned long age_hz;
2701-
2702-
/*
2703-
* This gets updated with the largest last_accessed (ie. most
2704-
* recently used) of the evicted buffers. It will not be reinitialised
2705-
* by __evict_many(), so you can use it across multiple invocations.
2706-
*/
2707-
unsigned long last_accessed;
2708-
};
2709-
2710-
/*
2711-
* We may not be able to evict this buffer if IO pending or the client
2712-
* is still using it.
2713-
*
2714-
* And if GFP_NOFS is used, we must not do any I/O because we hold
2715-
* dm_bufio_clients_lock and we would risk deadlock if the I/O gets
2716-
* rerouted to different bufio client.
2717-
*/
2718-
static enum evict_result select_for_evict(struct dm_buffer *b, void *context)
2719-
{
2720-
struct evict_params *params = context;
2721-
2722-
if (!(params->gfp & __GFP_FS) ||
2723-
(static_branch_unlikely(&no_sleep_enabled) && b->c->no_sleep)) {
2724-
if (test_bit_acquire(B_READING, &b->state) ||
2725-
test_bit(B_WRITING, &b->state) ||
2726-
test_bit(B_DIRTY, &b->state))
2727-
return ER_DONT_EVICT;
2728-
}
2729-
2730-
return older_than(b, params->age_hz) ? ER_EVICT : ER_STOP;
2731-
}
2732-
2733-
static unsigned long __evict_many(struct dm_bufio_client *c,
2734-
struct evict_params *params,
2735-
int list_mode, unsigned long max_count)
2736-
{
2737-
unsigned long count;
2738-
unsigned long last_accessed;
2739-
struct dm_buffer *b;
2740-
2741-
for (count = 0; count < max_count; count++) {
2742-
b = cache_evict(&c->cache, list_mode, select_for_evict, params);
2743-
if (!b)
2744-
break;
2745-
2746-
last_accessed = READ_ONCE(b->last_accessed);
2747-
if (time_after_eq(params->last_accessed, last_accessed))
2748-
params->last_accessed = last_accessed;
2749-
2750-
__make_buffer_clean(b);
2751-
__free_buffer_wake(b);
2752-
2753-
cond_resched();
2754-
}
2755-
2756-
return count;
2757-
}
2758-
2759-
static void evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz)
2760-
{
2761-
struct evict_params params = {.gfp = 0, .age_hz = age_hz, .last_accessed = 0};
2762-
unsigned long retain = get_retain_buffers(c);
2763-
unsigned long count;
2764-
LIST_HEAD(write_list);
2765-
2766-
dm_bufio_lock(c);
2767-
2768-
__check_watermark(c, &write_list);
2769-
if (unlikely(!list_empty(&write_list))) {
2770-
dm_bufio_unlock(c);
2771-
__flush_write_list(&write_list);
2772-
dm_bufio_lock(c);
2773-
}
2774-
2775-
count = cache_total(&c->cache);
2776-
if (count > retain)
2777-
__evict_many(c, &params, LIST_CLEAN, count - retain);
2778-
2779-
dm_bufio_unlock(c);
2780-
}
2781-
2782-
static void cleanup_old_buffers(void)
2783-
{
2784-
unsigned long max_age_hz = get_max_age_hz();
2785-
struct dm_bufio_client *c;
2786-
2787-
mutex_lock(&dm_bufio_clients_lock);
2788-
2789-
__cache_size_refresh();
2790-
2791-
list_for_each_entry(c, &dm_bufio_all_clients, client_list)
2792-
evict_old_buffers(c, max_age_hz);
2793-
2794-
mutex_unlock(&dm_bufio_clients_lock);
2795-
}
2796-
2797-
static void work_fn(struct work_struct *w)
2798-
{
2799-
cleanup_old_buffers();
2800-
2801-
queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work,
2802-
DM_BUFIO_WORK_TIMER_SECS * HZ);
2803-
}
2804-
2805-
/*--------------------------------------------------------------*/
2806-
28072670
/*
28082671
* Global cleanup tries to evict the oldest buffers from across _all_
28092672
* the clients. It does this by repeatedly evicting a few buffers from
@@ -2841,27 +2704,51 @@ static void __insert_client(struct dm_bufio_client *new_client)
28412704
list_add_tail(&new_client->client_list, h);
28422705
}
28432706

2707+
static enum evict_result select_for_evict(struct dm_buffer *b, void *context)
2708+
{
2709+
/* In no-sleep mode, we cannot wait on IO. */
2710+
if (static_branch_unlikely(&no_sleep_enabled) && b->c->no_sleep) {
2711+
if (test_bit_acquire(B_READING, &b->state) ||
2712+
test_bit(B_WRITING, &b->state) ||
2713+
test_bit(B_DIRTY, &b->state))
2714+
return ER_DONT_EVICT;
2715+
}
2716+
return ER_EVICT;
2717+
}
2718+
28442719
static unsigned long __evict_a_few(unsigned long nr_buffers)
28452720
{
2846-
unsigned long count;
28472721
struct dm_bufio_client *c;
2848-
struct evict_params params = {
2849-
.gfp = GFP_KERNEL,
2850-
.age_hz = 0,
2851-
/* set to jiffies in case there are no buffers in this client */
2852-
.last_accessed = jiffies
2853-
};
2722+
unsigned long oldest_buffer = jiffies;
2723+
unsigned long last_accessed;
2724+
unsigned long count;
2725+
struct dm_buffer *b;
28542726

28552727
c = __pop_client();
28562728
if (!c)
28572729
return 0;
28582730

28592731
dm_bufio_lock(c);
2860-
count = __evict_many(c, &params, LIST_CLEAN, nr_buffers);
2732+
2733+
for (count = 0; count < nr_buffers; count++) {
2734+
b = cache_evict(&c->cache, LIST_CLEAN, select_for_evict, NULL);
2735+
if (!b)
2736+
break;
2737+
2738+
last_accessed = READ_ONCE(b->last_accessed);
2739+
if (time_after_eq(oldest_buffer, last_accessed))
2740+
oldest_buffer = last_accessed;
2741+
2742+
__make_buffer_clean(b);
2743+
__free_buffer_wake(b);
2744+
2745+
cond_resched();
2746+
}
2747+
28612748
dm_bufio_unlock(c);
28622749

28632750
if (count)
2864-
c->oldest_buffer = params.last_accessed;
2751+
c->oldest_buffer = oldest_buffer;
28652752
__insert_client(c);
28662753

28672754
return count;
@@ -2944,10 +2831,7 @@ static int __init dm_bufio_init(void)
29442831
if (!dm_bufio_wq)
29452832
return -ENOMEM;
29462833

2947-
INIT_DELAYED_WORK(&dm_bufio_cleanup_old_work, work_fn);
29482834
INIT_WORK(&dm_bufio_replacement_work, do_global_cleanup);
2949-
queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work,
2950-
DM_BUFIO_WORK_TIMER_SECS * HZ);
29512835

29522836
return 0;
29532837
}
@@ -2959,7 +2843,6 @@ static void __exit dm_bufio_exit(void)
29592843
{
29602844
int bug = 0;
29612845

2962-
cancel_delayed_work_sync(&dm_bufio_cleanup_old_work);
29632846
destroy_workqueue(dm_bufio_wq);
29642847

29652848
if (dm_bufio_client_count) {
@@ -2996,7 +2879,7 @@ module_param_named(max_cache_size_bytes, dm_bufio_cache_size, ulong, 0644);
29962879
MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache");
29972880

29982881
module_param_named(max_age_seconds, dm_bufio_max_age, uint, 0644);
2999-
MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds");
2882+
MODULE_PARM_DESC(max_age_seconds, "No longer does anything");
30002883

30012884
module_param_named(retain_bytes, dm_bufio_retain_bytes, ulong, 0644);
30022885
MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory");

drivers/md/dm-core.h

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -141,6 +141,7 @@ struct mapped_device {
141141
#ifdef CONFIG_BLK_DEV_ZONED
142142
unsigned int nr_zones;
143143
void *zone_revalidate_map;
144+
struct task_struct *revalidate_map_task;
144145
#endif
145146

146147
#ifdef CONFIG_IMA
@@ -162,9 +163,6 @@ struct mapped_device {
162163
#define DMF_POST_SUSPENDING 8
163164
#define DMF_EMULATE_ZONE_APPEND 9
164165

165-
void disable_discard(struct mapped_device *md);
166-
void disable_write_zeroes(struct mapped_device *md);
167-
168166
static inline sector_t dm_get_size(struct mapped_device *md)
169167
{
170168
return get_capacity(md->disk);

drivers/md/dm-delay.c

Lines changed: 14 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -14,11 +14,14 @@
1414
#include <linux/bio.h>
1515
#include <linux/slab.h>
1616
#include <linux/kthread.h>
17+
#include <linux/delay.h>
1718

1819
#include <linux/device-mapper.h>
1920

2021
#define DM_MSG_PREFIX "delay"
2122

23+
#define SLEEP_SHIFT 3
24+
2225
struct delay_class {
2326
struct dm_dev *dev;
2427
sector_t start;
@@ -34,6 +37,7 @@ struct delay_c {
3437
struct work_struct flush_expired_bios;
3538
struct list_head delayed_bios;
3639
struct task_struct *worker;
40+
unsigned int worker_sleep_us;
3741
bool may_delay;
3842

3943
struct delay_class read;
@@ -136,6 +140,7 @@ static int flush_worker_fn(void *data)
136140
schedule();
137141
} else {
138142
spin_unlock(&dc->delayed_bios_lock);
143+
fsleep(dc->worker_sleep_us);
139144
cond_resched();
140145
}
141146
}
@@ -212,7 +217,7 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
212217
{
213218
struct delay_c *dc;
214219
int ret;
215-
unsigned int max_delay;
220+
unsigned int max_delay, min_delay;
216221

217222
if (argc != 3 && argc != 6 && argc != 9) {
218223
ti->error = "Requires exactly 3, 6 or 9 arguments";
@@ -235,7 +240,7 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
235240
ret = delay_class_ctr(ti, &dc->read, argv);
236241
if (ret)
237242
goto bad;
238-
max_delay = dc->read.delay;
243+
min_delay = max_delay = dc->read.delay;
239244

240245
if (argc == 3) {
241246
ret = delay_class_ctr(ti, &dc->write, argv);
@@ -251,6 +256,7 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
251256
if (ret)
252257
goto bad;
253258
max_delay = max(max_delay, dc->write.delay);
259+
min_delay = min_not_zero(min_delay, dc->write.delay);
254260

255261
if (argc == 6) {
256262
ret = delay_class_ctr(ti, &dc->flush, argv + 3);
@@ -263,9 +269,14 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
263269
if (ret)
264270
goto bad;
265271
max_delay = max(max_delay, dc->flush.delay);
272+
min_delay = min_not_zero(min_delay, dc->flush.delay);
266273

267274
out:
268275
if (max_delay < 50) {
276+
if (min_delay >> SLEEP_SHIFT)
277+
dc->worker_sleep_us = 1000;
278+
else
279+
dc->worker_sleep_us = (min_delay * 1000) >> SLEEP_SHIFT;
269280
/*
270281
* In case of small requested delays, use kthread instead of
271282
* timers and workqueue to achieve better latency.
@@ -438,7 +449,7 @@ static int delay_iterate_devices(struct dm_target *ti,
438449

439450
static struct target_type delay_target = {
440451
.name = "delay",
441-
.version = {1, 4, 0},
452+
.version = {1, 5, 0},
442453
.features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_ZONED_HM,
443454
.module = THIS_MODULE,
444455
.ctr = delay_ctr,

drivers/md/dm-dust.c

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -534,7 +534,9 @@ static void dust_status(struct dm_target *ti, status_type_t type,
534534
}
535535
}
536536

537-
static int dust_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
537+
static int dust_prepare_ioctl(struct dm_target *ti, struct block_device **bdev,
538+
unsigned int cmd, unsigned long arg,
539+
bool *forward)
538540
{
539541
struct dust_device *dd = ti->private;
540542
struct dm_dev *dev = dd->dev;

0 commit comments

Comments
 (0)