Skip to content

Commit 4cee37b

Browse files
committed
Merge tag 'mm-hotfixes-stable-2022-12-10-1' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull misc fixes from Andrew Morton: "Nine hotfixes. Six for MM, three for other areas. Four of these patches address post-6.0 issues" * tag 'mm-hotfixes-stable-2022-12-10-1' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: memcg: fix possible use-after-free in memcg_write_event_control() MAINTAINERS: update Muchun Song's email mm/gup: fix gup_pud_range() for dax mmap: fix do_brk_flags() modifying obviously incorrect VMAs mm/swap: fix SWP_PFN_BITS with CONFIG_PHYS_ADDR_T_64BIT on 32bit tmpfs: fix data loss from failed fallocate kselftests: cgroup: update kmem test precision tolerance mm: do not BUG_ON missing brk mapping, because userspace can unmap it mailmap: update Matti Vaittinen's email address
2 parents 296a7b7 + 4a7ba45 commit 4cee37b

File tree

7 files changed

+29
-19
lines changed

7 files changed

+29
-19
lines changed

.mailmap

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -287,6 +287,7 @@ Matthew Wilcox <willy@infradead.org> <willy@linux.intel.com>
287287
Matthew Wilcox <willy@infradead.org> <willy@parisc-linux.org>
288288
Matthias Fuchs <socketcan@esd.eu> <matthias.fuchs@esd.eu>
289289
Matthieu CASTET <castet.matthieu@free.fr>
290+
Matti Vaittinen <mazziesaccount@gmail.com> <matti.vaittinen@fi.rohmeurope.com>
290291
Matt Ranostay <matt.ranostay@konsulko.com> <matt@ranostay.consulting>
291292
Matt Ranostay <mranostay@gmail.com> Matthew Ranostay <mranostay@embeddedalley.com>
292293
Matt Ranostay <mranostay@gmail.com> <matt.ranostay@intel.com>
@@ -372,6 +373,8 @@ Ricardo Ribalda <ribalda@kernel.org> <ricardo.ribalda@gmail.com>
372373
Roman Gushchin <roman.gushchin@linux.dev> <guro@fb.com>
373374
Roman Gushchin <roman.gushchin@linux.dev> <guroan@gmail.com>
374375
Roman Gushchin <roman.gushchin@linux.dev> <klamm@yandex-team.ru>
376+
Muchun Song <muchun.song@linux.dev> <songmuchun@bytedance.com>
377+
Muchun Song <muchun.song@linux.dev> <smuchun@gmail.com>
375378
Ross Zwisler <zwisler@kernel.org> <ross.zwisler@linux.intel.com>
376379
Rudolf Marek <R.Marek@sh.cvut.cz>
377380
Rui Saraiva <rmps@joel.ist.utl.pt>

MAINTAINERS

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5299,7 +5299,7 @@ M: Johannes Weiner <hannes@cmpxchg.org>
52995299
M: Michal Hocko <mhocko@kernel.org>
53005300
M: Roman Gushchin <roman.gushchin@linux.dev>
53015301
M: Shakeel Butt <shakeelb@google.com>
5302-
R: Muchun Song <songmuchun@bytedance.com>
5302+
R: Muchun Song <muchun.song@linux.dev>
53035303
L: cgroups@vger.kernel.org
53045304
L: linux-mm@kvack.org
53055305
S: Maintained
@@ -9439,7 +9439,7 @@ F: drivers/net/ethernet/huawei/hinic/
94399439

94409440
HUGETLB SUBSYSTEM
94419441
M: Mike Kravetz <mike.kravetz@oracle.com>
9442-
M: Muchun Song <songmuchun@bytedance.com>
9442+
M: Muchun Song <muchun.song@linux.dev>
94439443
L: linux-mm@kvack.org
94449444
S: Maintained
94459445
F: Documentation/ABI/testing/sysfs-kernel-mm-hugepages

include/linux/swapops.h

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -33,11 +33,13 @@
3333
* can use the extra bits to store other information besides PFN.
3434
*/
3535
#ifdef MAX_PHYSMEM_BITS
36-
#define SWP_PFN_BITS (MAX_PHYSMEM_BITS - PAGE_SHIFT)
36+
#define SWP_PFN_BITS (MAX_PHYSMEM_BITS - PAGE_SHIFT)
3737
#else /* MAX_PHYSMEM_BITS */
38-
#define SWP_PFN_BITS (BITS_PER_LONG - PAGE_SHIFT)
38+
#define SWP_PFN_BITS min_t(int, \
39+
sizeof(phys_addr_t) * 8 - PAGE_SHIFT, \
40+
SWP_TYPE_SHIFT)
3941
#endif /* MAX_PHYSMEM_BITS */
40-
#define SWP_PFN_MASK (BIT(SWP_PFN_BITS) - 1)
42+
#define SWP_PFN_MASK (BIT(SWP_PFN_BITS) - 1)
4143

4244
/**
4345
* Migration swap entry specific bitfield definitions. Layout:

mm/gup.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2852,7 +2852,7 @@ static int gup_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr, unsigned lo
28522852
next = pud_addr_end(addr, end);
28532853
if (unlikely(!pud_present(pud)))
28542854
return 0;
2855-
if (unlikely(pud_huge(pud))) {
2855+
if (unlikely(pud_huge(pud) || pud_devmap(pud))) {
28562856
if (!gup_huge_pud(pud, pudp, addr, next, flags,
28572857
pages, nr))
28582858
return 0;

mm/mmap.c

Lines changed: 4 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -226,8 +226,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
226226
/* Search one past newbrk */
227227
mas_set(&mas, newbrk);
228228
brkvma = mas_find(&mas, oldbrk);
229-
BUG_ON(brkvma == NULL);
230-
if (brkvma->vm_start >= oldbrk)
229+
if (!brkvma || brkvma->vm_start >= oldbrk)
231230
goto out; /* mapping intersects with an existing non-brk vma. */
232231
/*
233232
* mm->brk must be protected by write mmap_lock.
@@ -2946,9 +2945,9 @@ static int do_brk_flags(struct ma_state *mas, struct vm_area_struct *vma,
29462945
* Expand the existing vma if possible; Note that singular lists do not
29472946
* occur after forking, so the expand will only happen on new VMAs.
29482947
*/
2949-
if (vma &&
2950-
(!vma->anon_vma || list_is_singular(&vma->anon_vma_chain)) &&
2951-
((vma->vm_flags & ~VM_SOFTDIRTY) == flags)) {
2948+
if (vma && vma->vm_end == addr && !vma_policy(vma) &&
2949+
can_vma_merge_after(vma, flags, NULL, NULL,
2950+
addr >> PAGE_SHIFT, NULL_VM_UFFD_CTX, NULL)) {
29522951
mas_set_range(mas, vma->vm_start, addr + len - 1);
29532952
if (mas_preallocate(mas, vma, GFP_KERNEL))
29542953
return -ENOMEM;
@@ -3035,11 +3034,6 @@ int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags)
30353034
goto munmap_failed;
30363035

30373036
vma = mas_prev(&mas, 0);
3038-
if (!vma || vma->vm_end != addr || vma_policy(vma) ||
3039-
!can_vma_merge_after(vma, flags, NULL, NULL,
3040-
addr >> PAGE_SHIFT, NULL_VM_UFFD_CTX, NULL))
3041-
vma = NULL;
3042-
30433037
ret = do_brk_flags(&mas, vma, addr, len, flags);
30443038
populate = ((mm->def_flags & VM_LOCKED) != 0);
30453039
mmap_write_unlock(mm);

mm/shmem.c

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -948,6 +948,15 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
948948
index++;
949949
}
950950

951+
/*
952+
* When undoing a failed fallocate, we want none of the partial folio
953+
* zeroing and splitting below, but shall want to truncate the whole
954+
* folio when !uptodate indicates that it was added by this fallocate,
955+
* even when [lstart, lend] covers only a part of the folio.
956+
*/
957+
if (unfalloc)
958+
goto whole_folios;
959+
951960
same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT);
952961
folio = shmem_get_partial_folio(inode, lstart >> PAGE_SHIFT);
953962
if (folio) {
@@ -973,6 +982,8 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
973982
folio_put(folio);
974983
}
975984

985+
whole_folios:
986+
976987
index = start;
977988
while (index < end) {
978989
cond_resched();

tools/testing/selftests/cgroup/test_kmem.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -19,12 +19,12 @@
1919

2020

2121
/*
22-
* Memory cgroup charging is performed using percpu batches 32 pages
22+
* Memory cgroup charging is performed using percpu batches 64 pages
2323
* big (look at MEMCG_CHARGE_BATCH), whereas memory.stat is exact. So
2424
* the maximum discrepancy between charge and vmstat entries is number
25-
* of cpus multiplied by 32 pages.
25+
* of cpus multiplied by 64 pages.
2626
*/
27-
#define MAX_VMSTAT_ERROR (4096 * 32 * get_nprocs())
27+
#define MAX_VMSTAT_ERROR (4096 * 64 * get_nprocs())
2828

2929

3030
static int alloc_dcache(const char *cgroup, void *arg)

0 commit comments

Comments
 (0)