Skip to content

Commit 3c5c9b7

Browse files
committed
Merge tag 'mm-hotfixes-stable-2023-09-05-11-51' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull misc fixes from Andrew Morton: "Seven hotfixes. Four are cc:stable and the remainder pertain to issues which were introduced in the current merge window" * tag 'mm-hotfixes-stable-2023-09-05-11-51' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: sparc64: add missing initialization of folio in tlb_batch_add() mm: memory-failure: use rcu lock instead of tasklist_lock when collect_procs() revert "memfd: improve userspace warnings for missing exec-related flags". rcu: dump vmalloc memory info safely mm/vmalloc: add a safer version of find_vm_area() for debug tools/mm: fix undefined reference to pthread_once memcontrol: ensure memcg acquired by id is properly set up
2 parents 6155a3b + f4b4f3e commit 3c5c9b7

File tree

9 files changed

+56
-26
lines changed

9 files changed

+56
-26
lines changed

arch/sparc/mm/tlb.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -128,6 +128,7 @@ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
128128
goto no_cache_flush;
129129

130130
/* A real file page? */
131+
folio = page_folio(page);
131132
mapping = folio_flush_mapping(folio);
132133
if (!mapping)
133134
goto no_cache_flush;

mm/filemap.c

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -121,9 +121,6 @@
121121
* bdi.wb->list_lock (zap_pte_range->set_page_dirty)
122122
* ->inode->i_lock (zap_pte_range->set_page_dirty)
123123
* ->private_lock (zap_pte_range->block_dirty_folio)
124-
*
125-
* ->i_mmap_rwsem
126-
* ->tasklist_lock (memory_failure, collect_procs_ao)
127124
*/
128125

129126
static void page_cache_delete(struct address_space *mapping,

mm/ksm.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2925,7 +2925,7 @@ void collect_procs_ksm(struct page *page, struct list_head *to_kill,
29252925
struct anon_vma *av = rmap_item->anon_vma;
29262926

29272927
anon_vma_lock_read(av);
2928-
read_lock(&tasklist_lock);
2928+
rcu_read_lock();
29292929
for_each_process(tsk) {
29302930
struct anon_vma_chain *vmac;
29312931
unsigned long addr;
@@ -2944,7 +2944,7 @@ void collect_procs_ksm(struct page *page, struct list_head *to_kill,
29442944
}
29452945
}
29462946
}
2947-
read_unlock(&tasklist_lock);
2947+
rcu_read_unlock();
29482948
anon_vma_unlock_read(av);
29492949
}
29502950
}

mm/memcontrol.c

Lines changed: 17 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -5326,7 +5326,6 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
53265326
INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue);
53275327
memcg->deferred_split_queue.split_queue_len = 0;
53285328
#endif
5329-
idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
53305329
lru_gen_init_memcg(memcg);
53315330
return memcg;
53325331
fail:
@@ -5398,14 +5397,27 @@ static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
53985397
if (alloc_shrinker_info(memcg))
53995398
goto offline_kmem;
54005399

5401-
/* Online state pins memcg ID, memcg ID pins CSS */
5402-
refcount_set(&memcg->id.ref, 1);
5403-
css_get(css);
5404-
54055400
if (unlikely(mem_cgroup_is_root(memcg)))
54065401
queue_delayed_work(system_unbound_wq, &stats_flush_dwork,
54075402
FLUSH_TIME);
54085403
lru_gen_online_memcg(memcg);
5404+
5405+
/* Online state pins memcg ID, memcg ID pins CSS */
5406+
refcount_set(&memcg->id.ref, 1);
5407+
css_get(css);
5408+
5409+
/*
5410+
* Ensure mem_cgroup_from_id() works once we're fully online.
5411+
*
5412+
* We could do this earlier and require callers to filter with
5413+
* css_tryget_online(). But right now there are no users that
5414+
* need earlier access, and the workingset code relies on the
5415+
* cgroup tree linkage (mem_cgroup_get_nr_swap_pages()). So
5416+
* publish it here at the end of onlining. This matches the
5417+
* regular ID destruction during offlining.
5418+
*/
5419+
idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
5420+
54095421
return 0;
54105422
offline_kmem:
54115423
memcg_offline_kmem(memcg);

mm/memfd.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -316,7 +316,7 @@ SYSCALL_DEFINE2(memfd_create,
316316
return -EINVAL;
317317

318318
if (!(flags & (MFD_EXEC | MFD_NOEXEC_SEAL))) {
319-
pr_info_ratelimited(
319+
pr_warn_once(
320320
"%s[%d]: memfd_create() called without MFD_EXEC or MFD_NOEXEC_SEAL set\n",
321321
current->comm, task_pid_nr(current));
322322
}

mm/memory-failure.c

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -547,8 +547,8 @@ static void kill_procs(struct list_head *to_kill, int forcekill, bool fail,
547547
* on behalf of the thread group. Return task_struct of the (first found)
548548
* dedicated thread if found, and return NULL otherwise.
549549
*
550-
* We already hold read_lock(&tasklist_lock) in the caller, so we don't
551-
* have to call rcu_read_lock/unlock() in this function.
550+
* We already hold rcu lock in the caller, so we don't have to call
551+
* rcu_read_lock/unlock() in this function.
552552
*/
553553
static struct task_struct *find_early_kill_thread(struct task_struct *tsk)
554554
{
@@ -609,7 +609,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
609609
return;
610610

611611
pgoff = page_to_pgoff(page);
612-
read_lock(&tasklist_lock);
612+
rcu_read_lock();
613613
for_each_process(tsk) {
614614
struct anon_vma_chain *vmac;
615615
struct task_struct *t = task_early_kill(tsk, force_early);
@@ -626,7 +626,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
626626
add_to_kill_anon_file(t, page, vma, to_kill);
627627
}
628628
}
629-
read_unlock(&tasklist_lock);
629+
rcu_read_unlock();
630630
anon_vma_unlock_read(av);
631631
}
632632

@@ -642,7 +642,7 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
642642
pgoff_t pgoff;
643643

644644
i_mmap_lock_read(mapping);
645-
read_lock(&tasklist_lock);
645+
rcu_read_lock();
646646
pgoff = page_to_pgoff(page);
647647
for_each_process(tsk) {
648648
struct task_struct *t = task_early_kill(tsk, force_early);
@@ -662,7 +662,7 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
662662
add_to_kill_anon_file(t, page, vma, to_kill);
663663
}
664664
}
665-
read_unlock(&tasklist_lock);
665+
rcu_read_unlock();
666666
i_mmap_unlock_read(mapping);
667667
}
668668

@@ -685,7 +685,7 @@ static void collect_procs_fsdax(struct page *page,
685685
struct task_struct *tsk;
686686

687687
i_mmap_lock_read(mapping);
688-
read_lock(&tasklist_lock);
688+
rcu_read_lock();
689689
for_each_process(tsk) {
690690
struct task_struct *t = task_early_kill(tsk, true);
691691

@@ -696,7 +696,7 @@ static void collect_procs_fsdax(struct page *page,
696696
add_to_kill_fsdax(t, page, vma, to_kill, pgoff);
697697
}
698698
}
699-
read_unlock(&tasklist_lock);
699+
rcu_read_unlock();
700700
i_mmap_unlock_read(mapping);
701701
}
702702
#endif /* CONFIG_FS_DAX */

mm/util.c

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1068,7 +1068,9 @@ void mem_dump_obj(void *object)
10681068
if (vmalloc_dump_obj(object))
10691069
return;
10701070

1071-
if (virt_addr_valid(object))
1071+
if (is_vmalloc_addr(object))
1072+
type = "vmalloc memory";
1073+
else if (virt_addr_valid(object))
10721074
type = "non-slab/vmalloc memory";
10731075
else if (object == NULL)
10741076
type = "NULL pointer";

mm/vmalloc.c

Lines changed: 22 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -4278,14 +4278,32 @@ void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
42784278
#ifdef CONFIG_PRINTK
42794279
bool vmalloc_dump_obj(void *object)
42804280
{
4281-
struct vm_struct *vm;
42824281
void *objp = (void *)PAGE_ALIGN((unsigned long)object);
4282+
const void *caller;
4283+
struct vm_struct *vm;
4284+
struct vmap_area *va;
4285+
unsigned long addr;
4286+
unsigned int nr_pages;
42834287

4284-
vm = find_vm_area(objp);
4285-
if (!vm)
4288+
if (!spin_trylock(&vmap_area_lock))
4289+
return false;
4290+
va = __find_vmap_area((unsigned long)objp, &vmap_area_root);
4291+
if (!va) {
4292+
spin_unlock(&vmap_area_lock);
42864293
return false;
4294+
}
4295+
4296+
vm = va->vm;
4297+
if (!vm) {
4298+
spin_unlock(&vmap_area_lock);
4299+
return false;
4300+
}
4301+
addr = (unsigned long)vm->addr;
4302+
caller = vm->caller;
4303+
nr_pages = vm->nr_pages;
4304+
spin_unlock(&vmap_area_lock);
42874305
pr_cont(" %u-page vmalloc region starting at %#lx allocated at %pS\n",
4288-
vm->nr_pages, (unsigned long)vm->addr, vm->caller);
4306+
nr_pages, addr, caller);
42894307
return true;
42904308
}
42914309
#endif

tools/mm/Makefile

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,8 +8,8 @@ TARGETS=page-types slabinfo page_owner_sort
88
LIB_DIR = ../lib/api
99
LIBS = $(LIB_DIR)/libapi.a
1010

11-
CFLAGS += -Wall -Wextra -I../lib/
12-
LDFLAGS += $(LIBS)
11+
CFLAGS += -Wall -Wextra -I../lib/ -pthread
12+
LDFLAGS += $(LIBS) -pthread
1313

1414
all: $(TARGETS)
1515

0 commit comments

Comments
 (0)