Skip to content

Commit 281b9d9

Browse files
committed
Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton: "13 patches. Subsystems affected by this patch series: mm (memory-failure, memcg, userfaultfd, hugetlbfs, mremap, oom-kill, kasan, hmm), and kcov" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: mm/mmu_notifier.c: fix race in mmu_interval_notifier_remove() kcov: don't generate a warning on vm_insert_page()'s failure MAINTAINERS: add Vincenzo Frascino to KASAN reviewers oom_kill.c: futex: delay the OOM reaper to allow time for proper futex cleanup selftest/vm: add skip support to mremap_test selftest/vm: support xfail in mremap_test selftest/vm: verify remap destination address in mremap_test selftest/vm: verify mmap addr in mremap_test mm, hugetlb: allow for "high" userspace addresses userfaultfd: mark uffd_wp regardless of VM_WRITE flag memcg: sync flush only if periodic flush is delayed mm/memory-failure.c: skip huge_zero_page in memory_failure() mm/hwpoison: fix race between hugetlb free/demotion and memory_failure_hugetlb()
2 parents 3b8000a + 3195616 commit 281b9d9

File tree

18 files changed

+327
-87
lines changed

18 files changed

+327
-87
lines changed

MAINTAINERS

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10547,6 +10547,7 @@ M: Andrey Ryabinin <ryabinin.a.a@gmail.com>
1054710547
R: Alexander Potapenko <glider@google.com>
1054810548
R: Andrey Konovalov <andreyknvl@gmail.com>
1054910549
R: Dmitry Vyukov <dvyukov@google.com>
10550+
R: Vincenzo Frascino <vincenzo.frascino@arm.com>
1055010551
L: kasan-dev@googlegroups.com
1055110552
S: Maintained
1055210553
F: Documentation/dev-tools/kasan.rst

fs/hugetlbfs/inode.c

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -206,7 +206,7 @@ hugetlb_get_unmapped_area_bottomup(struct file *file, unsigned long addr,
206206
info.flags = 0;
207207
info.length = len;
208208
info.low_limit = current->mm->mmap_base;
209-
info.high_limit = TASK_SIZE;
209+
info.high_limit = arch_get_mmap_end(addr);
210210
info.align_mask = PAGE_MASK & ~huge_page_mask(h);
211211
info.align_offset = 0;
212212
return vm_unmapped_area(&info);
@@ -222,7 +222,7 @@ hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr,
222222
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
223223
info.length = len;
224224
info.low_limit = max(PAGE_SIZE, mmap_min_addr);
225-
info.high_limit = current->mm->mmap_base;
225+
info.high_limit = arch_get_mmap_base(addr, current->mm->mmap_base);
226226
info.align_mask = PAGE_MASK & ~huge_page_mask(h);
227227
info.align_offset = 0;
228228
addr = vm_unmapped_area(&info);
@@ -237,7 +237,7 @@ hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr,
237237
VM_BUG_ON(addr != -ENOMEM);
238238
info.flags = 0;
239239
info.low_limit = current->mm->mmap_base;
240-
info.high_limit = TASK_SIZE;
240+
info.high_limit = arch_get_mmap_end(addr);
241241
addr = vm_unmapped_area(&info);
242242
}
243243

@@ -251,6 +251,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
251251
struct mm_struct *mm = current->mm;
252252
struct vm_area_struct *vma;
253253
struct hstate *h = hstate_file(file);
254+
const unsigned long mmap_end = arch_get_mmap_end(addr);
254255

255256
if (len & ~huge_page_mask(h))
256257
return -EINVAL;
@@ -266,7 +267,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
266267
if (addr) {
267268
addr = ALIGN(addr, huge_page_size(h));
268269
vma = find_vma(mm, addr);
269-
if (TASK_SIZE - len >= addr &&
270+
if (mmap_end - len >= addr &&
270271
(!vma || addr + len <= vm_start_gap(vma)))
271272
return addr;
272273
}

include/linux/hugetlb.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -169,6 +169,7 @@ long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
169169
long freed);
170170
bool isolate_huge_page(struct page *page, struct list_head *list);
171171
int get_hwpoison_huge_page(struct page *page, bool *hugetlb);
172+
int get_huge_page_for_hwpoison(unsigned long pfn, int flags);
172173
void putback_active_hugepage(struct page *page);
173174
void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
174175
void free_huge_page(struct page *page);
@@ -378,6 +379,11 @@ static inline int get_hwpoison_huge_page(struct page *page, bool *hugetlb)
378379
return 0;
379380
}
380381

382+
static inline int get_huge_page_for_hwpoison(unsigned long pfn, int flags)
383+
{
384+
return 0;
385+
}
386+
381387
static inline void putback_active_hugepage(struct page *page)
382388
{
383389
}

include/linux/memcontrol.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1012,6 +1012,7 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
10121012
}
10131013

10141014
void mem_cgroup_flush_stats(void);
1015+
void mem_cgroup_flush_stats_delayed(void);
10151016

10161017
void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
10171018
int val);
@@ -1455,6 +1456,10 @@ static inline void mem_cgroup_flush_stats(void)
14551456
{
14561457
}
14571458

1459+
static inline void mem_cgroup_flush_stats_delayed(void)
1460+
{
1461+
}
1462+
14581463
static inline void __mod_memcg_lruvec_state(struct lruvec *lruvec,
14591464
enum node_stat_item idx, int val)
14601465
{

include/linux/mm.h

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3197,6 +3197,14 @@ extern int sysctl_memory_failure_recovery;
31973197
extern void shake_page(struct page *p);
31983198
extern atomic_long_t num_poisoned_pages __read_mostly;
31993199
extern int soft_offline_page(unsigned long pfn, int flags);
3200+
#ifdef CONFIG_MEMORY_FAILURE
3201+
extern int __get_huge_page_for_hwpoison(unsigned long pfn, int flags);
3202+
#else
3203+
static inline int __get_huge_page_for_hwpoison(unsigned long pfn, int flags)
3204+
{
3205+
return 0;
3206+
}
3207+
#endif
32003208

32013209
#ifndef arch_memory_failure
32023210
static inline int arch_memory_failure(unsigned long pfn, int flags)

include/linux/sched.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1443,6 +1443,7 @@ struct task_struct {
14431443
int pagefault_disabled;
14441444
#ifdef CONFIG_MMU
14451445
struct task_struct *oom_reaper_list;
1446+
struct timer_list oom_reaper_timer;
14461447
#endif
14471448
#ifdef CONFIG_VMAP_STACK
14481449
struct vm_struct *stack_vm_area;

include/linux/sched/mm.h

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -136,6 +136,14 @@ static inline void mm_update_next_owner(struct mm_struct *mm)
136136
#endif /* CONFIG_MEMCG */
137137

138138
#ifdef CONFIG_MMU
139+
#ifndef arch_get_mmap_end
140+
#define arch_get_mmap_end(addr) (TASK_SIZE)
141+
#endif
142+
143+
#ifndef arch_get_mmap_base
144+
#define arch_get_mmap_base(addr, base) (base)
145+
#endif
146+
139147
extern void arch_pick_mmap_layout(struct mm_struct *mm,
140148
struct rlimit *rlim_stack);
141149
extern unsigned long

kernel/kcov.c

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -475,8 +475,11 @@ static int kcov_mmap(struct file *filep, struct vm_area_struct *vma)
475475
vma->vm_flags |= VM_DONTEXPAND;
476476
for (off = 0; off < size; off += PAGE_SIZE) {
477477
page = vmalloc_to_page(kcov->area + off);
478-
if (vm_insert_page(vma, vma->vm_start + off, page))
479-
WARN_ONCE(1, "vm_insert_page() failed");
478+
res = vm_insert_page(vma, vma->vm_start + off, page);
479+
if (res) {
480+
pr_warn_once("kcov: vm_insert_page() failed\n");
481+
return res;
482+
}
480483
}
481484
return 0;
482485
exit:

mm/hugetlb.c

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6785,6 +6785,16 @@ int get_hwpoison_huge_page(struct page *page, bool *hugetlb)
67856785
return ret;
67866786
}
67876787

6788+
int get_huge_page_for_hwpoison(unsigned long pfn, int flags)
6789+
{
6790+
int ret;
6791+
6792+
spin_lock_irq(&hugetlb_lock);
6793+
ret = __get_huge_page_for_hwpoison(pfn, flags);
6794+
spin_unlock_irq(&hugetlb_lock);
6795+
return ret;
6796+
}
6797+
67886798
void putback_active_hugepage(struct page *page)
67896799
{
67906800
spin_lock_irq(&hugetlb_lock);

mm/memcontrol.c

Lines changed: 11 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -587,6 +587,9 @@ static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork);
587587
static DEFINE_SPINLOCK(stats_flush_lock);
588588
static DEFINE_PER_CPU(unsigned int, stats_updates);
589589
static atomic_t stats_flush_threshold = ATOMIC_INIT(0);
590+
static u64 flush_next_time;
591+
592+
#define FLUSH_TIME (2UL*HZ)
590593

591594
/*
592595
* Accessors to ensure that preemption is disabled on PREEMPT_RT because it can
@@ -637,6 +640,7 @@ static void __mem_cgroup_flush_stats(void)
637640
if (!spin_trylock_irqsave(&stats_flush_lock, flag))
638641
return;
639642

643+
flush_next_time = jiffies_64 + 2*FLUSH_TIME;
640644
cgroup_rstat_flush_irqsafe(root_mem_cgroup->css.cgroup);
641645
atomic_set(&stats_flush_threshold, 0);
642646
spin_unlock_irqrestore(&stats_flush_lock, flag);
@@ -648,10 +652,16 @@ void mem_cgroup_flush_stats(void)
648652
__mem_cgroup_flush_stats();
649653
}
650654

655+
void mem_cgroup_flush_stats_delayed(void)
656+
{
657+
if (time_after64(jiffies_64, flush_next_time))
658+
mem_cgroup_flush_stats();
659+
}
660+
651661
static void flush_memcg_stats_dwork(struct work_struct *w)
652662
{
653663
__mem_cgroup_flush_stats();
654-
queue_delayed_work(system_unbound_wq, &stats_flush_dwork, 2UL*HZ);
664+
queue_delayed_work(system_unbound_wq, &stats_flush_dwork, FLUSH_TIME);
655665
}
656666

657667
/**

0 commit comments

Comments
 (0)