Skip to content

Commit f4a8871

Browse files
committed
Merge tag 'mm-hotfixes-stable-2023-05-18-15-52' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull misc fixes from Andrew Morton: "Eight hotfixes. Four are cc:stable, the other four are for post-6.4 issues, or aren't considered suitable for backporting" * tag 'mm-hotfixes-stable-2023-05-18-15-52' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: MAINTAINERS: Cleanup Arm Display IP maintainers MAINTAINERS: repair pattern in DIALOG SEMICONDUCTOR DRIVERS nilfs2: fix use-after-free bug of nilfs_root in nilfs_evict_inode() mm: fix zswap writeback race condition mm: kfence: fix false positives on big endian zsmalloc: move LRU update from zs_map_object() to zs_malloc() mm: shrinkers: fix race condition on debugfs cleanup maple_tree: make maple state reusable after mas_empty_area()
2 parents 2d1bcbc + c7394fa commit f4a8871

File tree

9 files changed

+72
-52
lines changed

9 files changed

+72
-52
lines changed

MAINTAINERS

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1677,10 +1677,7 @@ F: drivers/power/reset/arm-versatile-reboot.c
16771677
F: drivers/soc/versatile/
16781678

16791679
ARM KOMEDA DRM-KMS DRIVER
1680-
M: James (Qian) Wang <james.qian.wang@arm.com>
16811680
M: Liviu Dudau <liviu.dudau@arm.com>
1682-
M: Mihail Atanassov <mihail.atanassov@arm.com>
1683-
L: Mali DP Maintainers <malidp@foss.arm.com>
16841681
S: Supported
16851682
T: git git://anongit.freedesktop.org/drm/drm-misc
16861683
F: Documentation/devicetree/bindings/display/arm,komeda.yaml
@@ -1701,8 +1698,6 @@ F: include/uapi/drm/panfrost_drm.h
17011698

17021699
ARM MALI-DP DRM DRIVER
17031700
M: Liviu Dudau <liviu.dudau@arm.com>
1704-
M: Brian Starkey <brian.starkey@arm.com>
1705-
L: Mali DP Maintainers <malidp@foss.arm.com>
17061701
S: Supported
17071702
T: git git://anongit.freedesktop.org/drm/drm-misc
17081703
F: Documentation/devicetree/bindings/display/arm,malidp.yaml
@@ -6012,7 +6007,7 @@ W: http://www.dialog-semiconductor.com/products
60126007
F: Documentation/devicetree/bindings/input/da90??-onkey.txt
60136008
F: Documentation/devicetree/bindings/input/dlg,da72??.txt
60146009
F: Documentation/devicetree/bindings/mfd/da90*.txt
6015-
F: Documentation/devicetree/bindings/mfd/da90*.yaml
6010+
F: Documentation/devicetree/bindings/mfd/dlg,da90*.yaml
60166011
F: Documentation/devicetree/bindings/regulator/da92*.txt
60176012
F: Documentation/devicetree/bindings/regulator/dlg,da9*.yaml
60186013
F: Documentation/devicetree/bindings/regulator/slg51000.txt

fs/nilfs2/inode.c

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -917,6 +917,7 @@ void nilfs_evict_inode(struct inode *inode)
917917
struct nilfs_transaction_info ti;
918918
struct super_block *sb = inode->i_sb;
919919
struct nilfs_inode_info *ii = NILFS_I(inode);
920+
struct the_nilfs *nilfs;
920921
int ret;
921922

922923
if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) {
@@ -929,6 +930,23 @@ void nilfs_evict_inode(struct inode *inode)
929930

930931
truncate_inode_pages_final(&inode->i_data);
931932

933+
nilfs = sb->s_fs_info;
934+
if (unlikely(sb_rdonly(sb) || !nilfs->ns_writer)) {
935+
/*
936+
* If this inode is about to be disposed after the file system
937+
* has been degraded to read-only due to file system corruption
938+
* or after the writer has been detached, do not make any
939+
* changes that cause writes, just clear it.
940+
* Do this check after read-locking ns_segctor_sem by
941+
* nilfs_transaction_begin() in order to avoid a race with
942+
* the writer detach operation.
943+
*/
944+
clear_inode(inode);
945+
nilfs_clear_inode(inode);
946+
nilfs_transaction_abort(sb);
947+
return;
948+
}
949+
932950
/* TODO: some of the following operations may fail. */
933951
nilfs_truncate_bmap(ii, 0);
934952
nilfs_mark_inode_dirty(inode);

include/linux/shrinker.h

Lines changed: 11 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -107,18 +107,27 @@ extern void synchronize_shrinkers(void);
107107

108108
#ifdef CONFIG_SHRINKER_DEBUG
109109
extern int shrinker_debugfs_add(struct shrinker *shrinker);
110-
extern struct dentry *shrinker_debugfs_remove(struct shrinker *shrinker);
110+
extern struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker,
111+
int *debugfs_id);
112+
extern void shrinker_debugfs_remove(struct dentry *debugfs_entry,
113+
int debugfs_id);
111114
extern int __printf(2, 3) shrinker_debugfs_rename(struct shrinker *shrinker,
112115
const char *fmt, ...);
113116
#else /* CONFIG_SHRINKER_DEBUG */
114117
static inline int shrinker_debugfs_add(struct shrinker *shrinker)
115118
{
116119
return 0;
117120
}
118-
static inline struct dentry *shrinker_debugfs_remove(struct shrinker *shrinker)
121+
static inline struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker,
122+
int *debugfs_id)
119123
{
124+
*debugfs_id = -1;
120125
return NULL;
121126
}
127+
static inline void shrinker_debugfs_remove(struct dentry *debugfs_entry,
128+
int debugfs_id)
129+
{
130+
}
122131
static inline __printf(2, 3)
123132
int shrinker_debugfs_rename(struct shrinker *shrinker, const char *fmt, ...)
124133
{

lib/maple_tree.c

Lines changed: 3 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -5317,15 +5317,9 @@ int mas_empty_area(struct ma_state *mas, unsigned long min,
53175317

53185318
mt = mte_node_type(mas->node);
53195319
pivots = ma_pivots(mas_mn(mas), mt);
5320-
if (offset)
5321-
mas->min = pivots[offset - 1] + 1;
5322-
5323-
if (offset < mt_pivots[mt])
5324-
mas->max = pivots[offset];
5325-
5326-
if (mas->index < mas->min)
5327-
mas->index = mas->min;
5328-
5320+
min = mas_safe_min(mas, pivots, offset);
5321+
if (mas->index < min)
5322+
mas->index = min;
53295323
mas->last = mas->index + size - 1;
53305324
return 0;
53315325
}

mm/kfence/kfence.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@
2929
* canary of every 8 bytes is the same. 64-bit memory can be filled and checked
3030
* at a time instead of byte by byte to improve performance.
3131
*/
32-
#define KFENCE_CANARY_PATTERN_U64 ((u64)0xaaaaaaaaaaaaaaaa ^ (u64)(0x0706050403020100))
32+
#define KFENCE_CANARY_PATTERN_U64 ((u64)0xaaaaaaaaaaaaaaaa ^ (u64)(le64_to_cpu(0x0706050403020100)))
3333

3434
/* Maximum stack depth for reports. */
3535
#define KFENCE_STACK_DEPTH 64

mm/shrinker_debug.c

Lines changed: 10 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -237,7 +237,8 @@ int shrinker_debugfs_rename(struct shrinker *shrinker, const char *fmt, ...)
237237
}
238238
EXPORT_SYMBOL(shrinker_debugfs_rename);
239239

240-
struct dentry *shrinker_debugfs_remove(struct shrinker *shrinker)
240+
struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker,
241+
int *debugfs_id)
241242
{
242243
struct dentry *entry = shrinker->debugfs_entry;
243244

@@ -246,14 +247,18 @@ struct dentry *shrinker_debugfs_remove(struct shrinker *shrinker)
246247
kfree_const(shrinker->name);
247248
shrinker->name = NULL;
248249

249-
if (entry) {
250-
ida_free(&shrinker_debugfs_ida, shrinker->debugfs_id);
251-
shrinker->debugfs_entry = NULL;
252-
}
250+
*debugfs_id = entry ? shrinker->debugfs_id : -1;
251+
shrinker->debugfs_entry = NULL;
253252

254253
return entry;
255254
}
256255

256+
void shrinker_debugfs_remove(struct dentry *debugfs_entry, int debugfs_id)
257+
{
258+
debugfs_remove_recursive(debugfs_entry);
259+
ida_free(&shrinker_debugfs_ida, debugfs_id);
260+
}
261+
257262
static int __init shrinker_debugfs_init(void)
258263
{
259264
struct shrinker *shrinker;

mm/vmscan.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -805,6 +805,7 @@ EXPORT_SYMBOL(register_shrinker);
805805
void unregister_shrinker(struct shrinker *shrinker)
806806
{
807807
struct dentry *debugfs_entry;
808+
int debugfs_id;
808809

809810
if (!(shrinker->flags & SHRINKER_REGISTERED))
810811
return;
@@ -814,13 +815,13 @@ void unregister_shrinker(struct shrinker *shrinker)
814815
shrinker->flags &= ~SHRINKER_REGISTERED;
815816
if (shrinker->flags & SHRINKER_MEMCG_AWARE)
816817
unregister_memcg_shrinker(shrinker);
817-
debugfs_entry = shrinker_debugfs_remove(shrinker);
818+
debugfs_entry = shrinker_debugfs_detach(shrinker, &debugfs_id);
818819
mutex_unlock(&shrinker_mutex);
819820

820821
atomic_inc(&shrinker_srcu_generation);
821822
synchronize_srcu(&shrinker_srcu);
822823

823-
debugfs_remove_recursive(debugfs_entry);
824+
shrinker_debugfs_remove(debugfs_entry, debugfs_id);
824825

825826
kfree(shrinker->nr_deferred);
826827
shrinker->nr_deferred = NULL;

mm/zsmalloc.c

Lines changed: 9 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -1331,31 +1331,6 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
13311331
obj_to_location(obj, &page, &obj_idx);
13321332
zspage = get_zspage(page);
13331333

1334-
#ifdef CONFIG_ZPOOL
1335-
/*
1336-
* Move the zspage to front of pool's LRU.
1337-
*
1338-
* Note that this is swap-specific, so by definition there are no ongoing
1339-
* accesses to the memory while the page is swapped out that would make
1340-
* it "hot". A new entry is hot, then ages to the tail until it gets either
1341-
* written back or swaps back in.
1342-
*
1343-
* Furthermore, map is also called during writeback. We must not put an
1344-
* isolated page on the LRU mid-reclaim.
1345-
*
1346-
* As a result, only update the LRU when the page is mapped for write
1347-
* when it's first instantiated.
1348-
*
1349-
* This is a deviation from the other backends, which perform this update
1350-
* in the allocation function (zbud_alloc, z3fold_alloc).
1351-
*/
1352-
if (mm == ZS_MM_WO) {
1353-
if (!list_empty(&zspage->lru))
1354-
list_del(&zspage->lru);
1355-
list_add(&zspage->lru, &pool->lru);
1356-
}
1357-
#endif
1358-
13591334
/*
13601335
* migration cannot move any zpages in this zspage. Here, pool->lock
13611336
* is too heavy since callers would take some time until they calls
@@ -1525,9 +1500,8 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
15251500
fix_fullness_group(class, zspage);
15261501
record_obj(handle, obj);
15271502
class_stat_inc(class, ZS_OBJS_INUSE, 1);
1528-
spin_unlock(&pool->lock);
15291503

1530-
return handle;
1504+
goto out;
15311505
}
15321506

15331507
spin_unlock(&pool->lock);
@@ -1550,6 +1524,14 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
15501524

15511525
/* We completely set up zspage so mark them as movable */
15521526
SetZsPageMovable(pool, zspage);
1527+
out:
1528+
#ifdef CONFIG_ZPOOL
1529+
/* Add/move zspage to beginning of LRU */
1530+
if (!list_empty(&zspage->lru))
1531+
list_del(&zspage->lru);
1532+
list_add(&zspage->lru, &pool->lru);
1533+
#endif
1534+
15531535
spin_unlock(&pool->lock);
15541536

15551537
return handle;

mm/zswap.c

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1020,6 +1020,22 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
10201020
goto fail;
10211021

10221022
case ZSWAP_SWAPCACHE_NEW: /* page is locked */
1023+
/*
1024+
* Having a local reference to the zswap entry doesn't exclude
1025+
* swapping from invalidating and recycling the swap slot. Once
1026+
* the swapcache is secured against concurrent swapping to and
1027+
* from the slot, recheck that the entry is still current before
1028+
* writing.
1029+
*/
1030+
spin_lock(&tree->lock);
1031+
if (zswap_rb_search(&tree->rbroot, entry->offset) != entry) {
1032+
spin_unlock(&tree->lock);
1033+
delete_from_swap_cache(page_folio(page));
1034+
ret = -ENOMEM;
1035+
goto fail;
1036+
}
1037+
spin_unlock(&tree->lock);
1038+
10231039
/* decompress */
10241040
acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
10251041
dlen = PAGE_SIZE;

0 commit comments

Comments
 (0)