Skip to content

Commit 190bf7b

Browse files
committed
Merge tag 'mm-hotfixes-stable-2023-08-11-13-44' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull misc fixes from Andrew Morton: "14 hotfixes. 11 of these are cc:stable and the remainder address post-6.4 issues, or are not considered suitable for -stable backporting" * tag 'mm-hotfixes-stable-2023-08-11-13-44' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: mm/damon/core: initialize damo_filter->list from damos_new_filter() nilfs2: fix use-after-free of nilfs_root in dirtying inodes via iput selftests: cgroup: fix test_kmem_basic false positives fs/proc/kcore: reinstate bounce buffer for KCORE_TEXT regions MAINTAINERS: add maple tree mailing list mm: compaction: fix endless looping over same migrate block selftests: mm: ksm: fix incorrect evaluation of parameter hugetlb: do not clear hugetlb dtor until allocating vmemmap mm: memory-failure: avoid false hwpoison page mapped error info mm: memory-failure: fix potential unexpected return value from unpoison_memory() mm/swapfile: fix wrong swap entry type for hwpoisoned swapcache page radix tree test suite: fix incorrect allocation size for pthreads crypto, cifs: fix error handling in extract_iter_to_sg() zsmalloc: fix races between modifications of fullness and isolated
2 parents 29d99aa + 5f1fc67 commit 190bf7b

File tree

16 files changed

+135
-54
lines changed

16 files changed

+135
-54
lines changed

MAINTAINERS

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12480,6 +12480,7 @@ F: net/mctp/
1248012480

1248112481
MAPLE TREE
1248212482
M: Liam R. Howlett <Liam.Howlett@oracle.com>
12483+
L: maple-tree@lists.infradead.org
1248312484
L: linux-mm@kvack.org
1248412485
S: Supported
1248512486
F: Documentation/core-api/maple_tree.rst

fs/nilfs2/inode.c

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1101,9 +1101,17 @@ int nilfs_set_file_dirty(struct inode *inode, unsigned int nr_dirty)
11011101

11021102
int __nilfs_mark_inode_dirty(struct inode *inode, int flags)
11031103
{
1104+
struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
11041105
struct buffer_head *ibh;
11051106
int err;
11061107

1108+
/*
1109+
* Do not dirty inodes after the log writer has been detached
1110+
* and its nilfs_root struct has been freed.
1111+
*/
1112+
if (unlikely(nilfs_purging(nilfs)))
1113+
return 0;
1114+
11071115
err = nilfs_load_inode_block(inode, &ibh);
11081116
if (unlikely(err)) {
11091117
nilfs_warn(inode->i_sb,

fs/nilfs2/segment.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2845,6 +2845,7 @@ void nilfs_detach_log_writer(struct super_block *sb)
28452845
nilfs_segctor_destroy(nilfs->ns_writer);
28462846
nilfs->ns_writer = NULL;
28472847
}
2848+
set_nilfs_purging(nilfs);
28482849

28492850
/* Force to free the list of dirty files */
28502851
spin_lock(&nilfs->ns_inode_lock);
@@ -2857,4 +2858,5 @@ void nilfs_detach_log_writer(struct super_block *sb)
28572858
up_write(&nilfs->ns_segctor_sem);
28582859

28592860
nilfs_dispose_list(nilfs, &garbage_list, 1);
2861+
clear_nilfs_purging(nilfs);
28602862
}

fs/nilfs2/the_nilfs.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,7 @@ enum {
2929
THE_NILFS_DISCONTINUED, /* 'next' pointer chain has broken */
3030
THE_NILFS_GC_RUNNING, /* gc process is running */
3131
THE_NILFS_SB_DIRTY, /* super block is dirty */
32+
THE_NILFS_PURGING, /* disposing dirty files for cleanup */
3233
};
3334

3435
/**
@@ -208,6 +209,7 @@ THE_NILFS_FNS(INIT, init)
208209
THE_NILFS_FNS(DISCONTINUED, discontinued)
209210
THE_NILFS_FNS(GC_RUNNING, gc_running)
210211
THE_NILFS_FNS(SB_DIRTY, sb_dirty)
212+
THE_NILFS_FNS(PURGING, purging)
211213

212214
/*
213215
* Mount option operations

fs/proc/kcore.c

Lines changed: 27 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -309,6 +309,8 @@ static void append_kcore_note(char *notes, size_t *i, const char *name,
309309

310310
static ssize_t read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter)
311311
{
312+
struct file *file = iocb->ki_filp;
313+
char *buf = file->private_data;
312314
loff_t *fpos = &iocb->ki_pos;
313315
size_t phdrs_offset, notes_offset, data_offset;
314316
size_t page_offline_frozen = 1;
@@ -555,10 +557,21 @@ static ssize_t read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter)
555557
case KCORE_VMEMMAP:
556558
case KCORE_TEXT:
557559
/*
558-
* We use _copy_to_iter() to bypass usermode hardening
559-
* which would otherwise prevent this operation.
560+
* Sadly we must use a bounce buffer here to be able to
561+
* make use of copy_from_kernel_nofault(), as these
562+
* memory regions might not always be mapped on all
563+
* architectures.
560564
*/
561-
if (_copy_to_iter((char *)start, tsz, iter) != tsz) {
565+
if (copy_from_kernel_nofault(buf, (void *)start, tsz)) {
566+
if (iov_iter_zero(tsz, iter) != tsz) {
567+
ret = -EFAULT;
568+
goto out;
569+
}
570+
/*
571+
* We know the bounce buffer is safe to copy from, so
572+
* use _copy_to_iter() directly.
573+
*/
574+
} else if (_copy_to_iter(buf, tsz, iter) != tsz) {
562575
ret = -EFAULT;
563576
goto out;
564577
}
@@ -595,6 +608,10 @@ static int open_kcore(struct inode *inode, struct file *filp)
595608
if (ret)
596609
return ret;
597610

611+
filp->private_data = kmalloc(PAGE_SIZE, GFP_KERNEL);
612+
if (!filp->private_data)
613+
return -ENOMEM;
614+
598615
if (kcore_need_update)
599616
kcore_update_ram();
600617
if (i_size_read(inode) != proc_root_kcore->size) {
@@ -605,9 +622,16 @@ static int open_kcore(struct inode *inode, struct file *filp)
605622
return 0;
606623
}
607624

625+
static int release_kcore(struct inode *inode, struct file *file)
626+
{
627+
kfree(file->private_data);
628+
return 0;
629+
}
630+
608631
static const struct proc_ops kcore_proc_ops = {
609632
.proc_read_iter = read_kcore_iter,
610633
.proc_open = open_kcore,
634+
.proc_release = release_kcore,
611635
.proc_lseek = default_llseek,
612636
};
613637

lib/scatterlist.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1148,7 +1148,7 @@ static ssize_t extract_user_to_sg(struct iov_iter *iter,
11481148

11491149
failed:
11501150
while (sgtable->nents > sgtable->orig_nents)
1151-
put_page(sg_page(&sgtable->sgl[--sgtable->nents]));
1151+
unpin_user_page(sg_page(&sgtable->sgl[--sgtable->nents]));
11521152
return res;
11531153
}
11541154

mm/compaction.c

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -912,11 +912,12 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
912912

913913
/*
914914
* Check if the pageblock has already been marked skipped.
915-
* Only the aligned PFN is checked as the caller isolates
915+
* Only the first PFN is checked as the caller isolates
916916
* COMPACT_CLUSTER_MAX at a time so the second call must
917917
* not falsely conclude that the block should be skipped.
918918
*/
919-
if (!valid_page && pageblock_aligned(low_pfn)) {
919+
if (!valid_page && (pageblock_aligned(low_pfn) ||
920+
low_pfn == cc->zone->zone_start_pfn)) {
920921
if (!isolation_suitable(cc, page)) {
921922
low_pfn = end_pfn;
922923
folio = NULL;
@@ -2002,7 +2003,8 @@ static isolate_migrate_t isolate_migratepages(struct compact_control *cc)
20022003
* before making it "skip" so other compaction instances do
20032004
* not scan the same block.
20042005
*/
2005-
if (pageblock_aligned(low_pfn) &&
2006+
if ((pageblock_aligned(low_pfn) ||
2007+
low_pfn == cc->zone->zone_start_pfn) &&
20062008
!fast_find_block && !isolation_suitable(cc, page))
20072009
continue;
20082010

mm/damon/core.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -273,6 +273,7 @@ struct damos_filter *damos_new_filter(enum damos_filter_type type,
273273
return NULL;
274274
filter->type = type;
275275
filter->matching = matching;
276+
INIT_LIST_HEAD(&filter->list);
276277
return filter;
277278
}
278279

mm/hugetlb.c

Lines changed: 51 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -1579,9 +1579,37 @@ static inline void destroy_compound_gigantic_folio(struct folio *folio,
15791579
unsigned int order) { }
15801580
#endif
15811581

1582+
static inline void __clear_hugetlb_destructor(struct hstate *h,
1583+
struct folio *folio)
1584+
{
1585+
lockdep_assert_held(&hugetlb_lock);
1586+
1587+
/*
1588+
* Very subtle
1589+
*
1590+
* For non-gigantic pages set the destructor to the normal compound
1591+
* page dtor. This is needed in case someone takes an additional
1592+
* temporary ref to the page, and freeing is delayed until they drop
1593+
* their reference.
1594+
*
1595+
* For gigantic pages set the destructor to the null dtor. This
1596+
* destructor will never be called. Before freeing the gigantic
1597+
* page destroy_compound_gigantic_folio will turn the folio into a
1598+
* simple group of pages. After this the destructor does not
1599+
* apply.
1600+
*
1601+
*/
1602+
if (hstate_is_gigantic(h))
1603+
folio_set_compound_dtor(folio, NULL_COMPOUND_DTOR);
1604+
else
1605+
folio_set_compound_dtor(folio, COMPOUND_PAGE_DTOR);
1606+
}
1607+
15821608
/*
1583-
* Remove hugetlb folio from lists, and update dtor so that the folio appears
1584-
* as just a compound page.
1609+
* Remove hugetlb folio from lists.
1610+
* If vmemmap exists for the folio, update dtor so that the folio appears
1611+
* as just a compound page. Otherwise, wait until after allocating vmemmap
1612+
* to update dtor.
15851613
*
15861614
* A reference is held on the folio, except in the case of demote.
15871615
*
@@ -1612,31 +1640,19 @@ static void __remove_hugetlb_folio(struct hstate *h, struct folio *folio,
16121640
}
16131641

16141642
/*
1615-
* Very subtle
1616-
*
1617-
* For non-gigantic pages set the destructor to the normal compound
1618-
* page dtor. This is needed in case someone takes an additional
1619-
* temporary ref to the page, and freeing is delayed until they drop
1620-
* their reference.
1621-
*
1622-
* For gigantic pages set the destructor to the null dtor. This
1623-
* destructor will never be called. Before freeing the gigantic
1624-
* page destroy_compound_gigantic_folio will turn the folio into a
1625-
* simple group of pages. After this the destructor does not
1626-
* apply.
1627-
*
1628-
* This handles the case where more than one ref is held when and
1629-
* after update_and_free_hugetlb_folio is called.
1630-
*
1631-
* In the case of demote we do not ref count the page as it will soon
1632-
* be turned into a page of smaller size.
1643+
* We can only clear the hugetlb destructor after allocating vmemmap
1644+
* pages. Otherwise, someone (memory error handling) may try to write
1645+
* to tail struct pages.
1646+
*/
1647+
if (!folio_test_hugetlb_vmemmap_optimized(folio))
1648+
__clear_hugetlb_destructor(h, folio);
1649+
1650+
/*
1651+
* In the case of demote we do not ref count the page as it will soon
1652+
* be turned into a page of smaller size.
16331653
*/
16341654
if (!demote)
16351655
folio_ref_unfreeze(folio, 1);
1636-
if (hstate_is_gigantic(h))
1637-
folio_set_compound_dtor(folio, NULL_COMPOUND_DTOR);
1638-
else
1639-
folio_set_compound_dtor(folio, COMPOUND_PAGE_DTOR);
16401656

16411657
h->nr_huge_pages--;
16421658
h->nr_huge_pages_node[nid]--;
@@ -1705,6 +1721,7 @@ static void __update_and_free_hugetlb_folio(struct hstate *h,
17051721
{
17061722
int i;
17071723
struct page *subpage;
1724+
bool clear_dtor = folio_test_hugetlb_vmemmap_optimized(folio);
17081725

17091726
if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
17101727
return;
@@ -1735,6 +1752,16 @@ static void __update_and_free_hugetlb_folio(struct hstate *h,
17351752
if (unlikely(folio_test_hwpoison(folio)))
17361753
folio_clear_hugetlb_hwpoison(folio);
17371754

1755+
/*
1756+
* If vmemmap pages were allocated above, then we need to clear the
1757+
* hugetlb destructor under the hugetlb lock.
1758+
*/
1759+
if (clear_dtor) {
1760+
spin_lock_irq(&hugetlb_lock);
1761+
__clear_hugetlb_destructor(h, folio);
1762+
spin_unlock_irq(&hugetlb_lock);
1763+
}
1764+
17381765
for (i = 0; i < pages_per_huge_page(h); i++) {
17391766
subpage = folio_page(folio, i);
17401767
subpage->flags &= ~(1 << PG_locked | 1 << PG_error |

mm/ksm.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2784,6 +2784,8 @@ struct page *ksm_might_need_to_copy(struct page *page,
27842784
anon_vma->root == vma->anon_vma->root) {
27852785
return page; /* still no need to copy it */
27862786
}
2787+
if (PageHWPoison(page))
2788+
return ERR_PTR(-EHWPOISON);
27872789
if (!PageUptodate(page))
27882790
return page; /* let do_swap_page report the error */
27892791

0 commit comments

Comments
 (0)