Skip to content

Commit 5eea582

Browse files
committed
Merge tag 'mm-stable-2023-09-04-14-00' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull more MM updates from Andrew Morton: - Stefan Roesch has added ksm statistics to /proc/pid/smaps - Also a number of singleton patches, mainly cleanups and leftovers * tag 'mm-stable-2023-09-04-14-00' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: mm/kmemleak: move up cond_resched() call in page scanning loop mm: page_alloc: remove stale CMA guard code MAINTAINERS: add rmap.h to mm entry rmap: remove anon_vma_link() nommu stub proc/ksm: add ksm stats to /proc/pid/smaps mm/hwpoison: rename hwp_walk* to hwpoison_walk* mm: memory-failure: add PageOffline() check
2 parents 893a259 + e68d343 commit 5eea582

File tree

7 files changed

+29
-30
lines changed

7 files changed

+29
-30
lines changed

Documentation/filesystems/proc.rst

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -461,6 +461,7 @@ Memory Area, or VMA) there is a series of lines such as the following::
461461
Private_Dirty: 0 kB
462462
Referenced: 892 kB
463463
Anonymous: 0 kB
464+
KSM: 0 kB
464465
LazyFree: 0 kB
465466
AnonHugePages: 0 kB
466467
ShmemPmdMapped: 0 kB
@@ -501,6 +502,9 @@ accessed.
501502
a mapping associated with a file may contain anonymous pages: when MAP_PRIVATE
502503
and a page is modified, the file page is replaced by a private anonymous copy.
503504

505+
"KSM" reports how many of the pages are KSM pages. Note that KSM-placed zeropages
506+
are not included, only actual KSM pages.
507+
504508
"LazyFree" shows the amount of memory which is marked by madvise(MADV_FREE).
505509
The memory isn't freed immediately with madvise(). It's freed in memory
506510
pressure if the memory is clean. Please note that the printed value might

MAINTAINERS

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13742,6 +13742,7 @@ F: include/linux/memory_hotplug.h
1374213742
F: include/linux/mm.h
1374313743
F: include/linux/mmzone.h
1374413744
F: include/linux/pagewalk.h
13745+
F: include/linux/rmap.h
1374513746
F: include/trace/events/ksm.h
1374613747
F: mm/
1374713748
F: tools/mm/

fs/proc/task_mmu.c

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
#include <linux/hugetlb.h>
55
#include <linux/huge_mm.h>
66
#include <linux/mount.h>
7+
#include <linux/ksm.h>
78
#include <linux/seq_file.h>
89
#include <linux/highmem.h>
910
#include <linux/ptrace.h>
@@ -396,6 +397,7 @@ struct mem_size_stats {
396397
unsigned long swap;
397398
unsigned long shared_hugetlb;
398399
unsigned long private_hugetlb;
400+
unsigned long ksm;
399401
u64 pss;
400402
u64 pss_anon;
401403
u64 pss_file;
@@ -452,6 +454,9 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page,
452454
mss->lazyfree += size;
453455
}
454456

457+
if (PageKsm(page))
458+
mss->ksm += size;
459+
455460
mss->resident += size;
456461
/* Accumulate the size in pages that have been accessed. */
457462
if (young || page_is_young(page) || PageReferenced(page))
@@ -825,6 +830,7 @@ static void __show_smap(struct seq_file *m, const struct mem_size_stats *mss,
825830
SEQ_PUT_DEC(" kB\nPrivate_Dirty: ", mss->private_dirty);
826831
SEQ_PUT_DEC(" kB\nReferenced: ", mss->referenced);
827832
SEQ_PUT_DEC(" kB\nAnonymous: ", mss->anonymous);
833+
SEQ_PUT_DEC(" kB\nKSM: ", mss->ksm);
828834
SEQ_PUT_DEC(" kB\nLazyFree: ", mss->lazyfree);
829835
SEQ_PUT_DEC(" kB\nAnonHugePages: ", mss->anonymous_thp);
830836
SEQ_PUT_DEC(" kB\nShmemPmdMapped: ", mss->shmem_thp);

include/linux/rmap.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -479,7 +479,6 @@ struct anon_vma *folio_lock_anon_vma_read(struct folio *folio,
479479

480480
#define anon_vma_init() do {} while (0)
481481
#define anon_vma_prepare(vma) (0)
482-
#define anon_vma_link(vma) do {} while (0)
483482

484483
static inline int folio_referenced(struct folio *folio, int is_locked,
485484
struct mem_cgroup *memcg,

mm/kmemleak.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1584,6 +1584,9 @@ static void kmemleak_scan(void)
15841584
for (pfn = start_pfn; pfn < end_pfn; pfn++) {
15851585
struct page *page = pfn_to_online_page(pfn);
15861586

1587+
if (!(pfn & 63))
1588+
cond_resched();
1589+
15871590
if (!page)
15881591
continue;
15891592

@@ -1594,8 +1597,6 @@ static void kmemleak_scan(void)
15941597
if (page_count(page) == 0)
15951598
continue;
15961599
scan_block(page, page + 1, NULL);
1597-
if (!(pfn & 63))
1598-
cond_resched();
15991600
}
16001601
}
16011602
put_online_mems();

mm/memory-failure.c

Lines changed: 11 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -717,7 +717,7 @@ static void collect_procs(struct page *page, struct list_head *tokill,
717717
collect_procs_file(page, tokill, force_early);
718718
}
719719

720-
struct hwp_walk {
720+
struct hwpoison_walk {
721721
struct to_kill tk;
722722
unsigned long pfn;
723723
int flags;
@@ -752,7 +752,7 @@ static int check_hwpoisoned_entry(pte_t pte, unsigned long addr, short shift,
752752

753753
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
754754
static int check_hwpoisoned_pmd_entry(pmd_t *pmdp, unsigned long addr,
755-
struct hwp_walk *hwp)
755+
struct hwpoison_walk *hwp)
756756
{
757757
pmd_t pmd = *pmdp;
758758
unsigned long pfn;
@@ -770,7 +770,7 @@ static int check_hwpoisoned_pmd_entry(pmd_t *pmdp, unsigned long addr,
770770
}
771771
#else
772772
static int check_hwpoisoned_pmd_entry(pmd_t *pmdp, unsigned long addr,
773-
struct hwp_walk *hwp)
773+
struct hwpoison_walk *hwp)
774774
{
775775
return 0;
776776
}
@@ -779,7 +779,7 @@ static int check_hwpoisoned_pmd_entry(pmd_t *pmdp, unsigned long addr,
779779
static int hwpoison_pte_range(pmd_t *pmdp, unsigned long addr,
780780
unsigned long end, struct mm_walk *walk)
781781
{
782-
struct hwp_walk *hwp = walk->private;
782+
struct hwpoison_walk *hwp = walk->private;
783783
int ret = 0;
784784
pte_t *ptep, *mapped_pte;
785785
spinlock_t *ptl;
@@ -813,7 +813,7 @@ static int hwpoison_hugetlb_range(pte_t *ptep, unsigned long hmask,
813813
unsigned long addr, unsigned long end,
814814
struct mm_walk *walk)
815815
{
816-
struct hwp_walk *hwp = walk->private;
816+
struct hwpoison_walk *hwp = walk->private;
817817
pte_t pte = huge_ptep_get(ptep);
818818
struct hstate *h = hstate_vma(walk->vma);
819819

@@ -824,7 +824,7 @@ static int hwpoison_hugetlb_range(pte_t *ptep, unsigned long hmask,
824824
#define hwpoison_hugetlb_range NULL
825825
#endif
826826

827-
static const struct mm_walk_ops hwp_walk_ops = {
827+
static const struct mm_walk_ops hwpoison_walk_ops = {
828828
.pmd_entry = hwpoison_pte_range,
829829
.hugetlb_entry = hwpoison_hugetlb_range,
830830
.walk_lock = PGWALK_RDLOCK,
@@ -847,7 +847,7 @@ static int kill_accessing_process(struct task_struct *p, unsigned long pfn,
847847
int flags)
848848
{
849849
int ret;
850-
struct hwp_walk priv = {
850+
struct hwpoison_walk priv = {
851851
.pfn = pfn,
852852
};
853853
priv.tk.tsk = p;
@@ -856,7 +856,7 @@ static int kill_accessing_process(struct task_struct *p, unsigned long pfn,
856856
return -EFAULT;
857857

858858
mmap_read_lock(p->mm);
859-
ret = walk_page_range(p->mm, 0, TASK_SIZE, &hwp_walk_ops,
859+
ret = walk_page_range(p->mm, 0, TASK_SIZE, &hwpoison_walk_ops,
860860
(void *)&priv);
861861
if (ret == 1 && priv.tk.addr)
862862
kill_proc(&priv.tk, pfn, flags);
@@ -1562,7 +1562,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
15621562
* Here we are interested only in user-mapped pages, so skip any
15631563
* other types of pages.
15641564
*/
1565-
if (PageReserved(p) || PageSlab(p) || PageTable(p))
1565+
if (PageReserved(p) || PageSlab(p) || PageTable(p) || PageOffline(p))
15661566
return true;
15671567
if (!(PageLRU(hpage) || PageHuge(p)))
15681568
return true;
@@ -2533,7 +2533,8 @@ int unpoison_memory(unsigned long pfn)
25332533
goto unlock_mutex;
25342534
}
25352535

2536-
if (folio_test_slab(folio) || PageTable(&folio->page) || folio_test_reserved(folio))
2536+
if (folio_test_slab(folio) || PageTable(&folio->page) ||
2537+
folio_test_reserved(folio) || PageOffline(&folio->page))
25372538
goto unlock_mutex;
25382539

25392540
/*

mm/page_alloc.c

Lines changed: 4 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -2641,12 +2641,6 @@ struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone,
26412641
do {
26422642
page = NULL;
26432643
spin_lock_irqsave(&zone->lock, flags);
2644-
/*
2645-
* order-0 request can reach here when the pcplist is skipped
2646-
* due to non-CMA allocation context. HIGHATOMIC area is
2647-
* reserved for high-order atomic allocation, so order-0
2648-
* request should skip it.
2649-
*/
26502644
if (alloc_flags & ALLOC_HIGHATOMIC)
26512645
page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
26522646
if (!page) {
@@ -2780,17 +2774,10 @@ struct page *rmqueue(struct zone *preferred_zone,
27802774
WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
27812775

27822776
if (likely(pcp_allowed_order(order))) {
2783-
/*
2784-
* MIGRATE_MOVABLE pcplist could have the pages on CMA area and
2785-
* we need to skip it when CMA area isn't allowed.
2786-
*/
2787-
if (!IS_ENABLED(CONFIG_CMA) || alloc_flags & ALLOC_CMA ||
2788-
migratetype != MIGRATE_MOVABLE) {
2789-
page = rmqueue_pcplist(preferred_zone, zone, order,
2790-
migratetype, alloc_flags);
2791-
if (likely(page))
2792-
goto out;
2793-
}
2777+
page = rmqueue_pcplist(preferred_zone, zone, order,
2778+
migratetype, alloc_flags);
2779+
if (likely(page))
2780+
goto out;
27942781
}
27952782

27962783
page = rmqueue_buddy(preferred_zone, zone, order, alloc_flags,

0 commit comments

Comments
 (0)