Skip to content

Commit 5cf909c

Browse files
oglittatehcaster
authored andcommitted
mm/slub: use stackdepot to save stack trace in objects
Many stack traces are similar so there are many similar arrays. Stackdepot saves each unique stack only once. Replace field addrs in struct track with depot_stack_handle_t handle. Use stackdepot to save stack trace. The benefits are smaller memory overhead and possibility to aggregate per-cache statistics in the following patch using the stackdepot handle instead of matching stacks manually. [ vbabka@suse.cz: rebase to 5.17-rc1 and adjust accordingly ] This was initially merged as commit 7886914 and reverted by commit ae14c63 due to several issues, that should now be fixed. The problem of unconditional memory overhead by stackdepot has been addressed by commit 2dba5eb ("lib/stackdepot: allow optional init and stack_table allocation by kvmalloc()"), so the dependency on stackdepot will result in extra memory usage only when a slab cache tracking is actually enabled, and not for all CONFIG_SLUB_DEBUG builds. The build failures on some architectures were also addressed, and the reported issue with xfs/433 test did not reproduce on 5.17-rc1 with this patch. Signed-off-by: Oliver Glitta <glittao@gmail.com> Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Reviewed-and-tested-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Acked-by: David Rientjes <rientjes@google.com> Cc: David Rientjes <rientjes@google.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
1 parent 0cd1a02 commit 5cf909c

File tree

4 files changed

+47
-31
lines changed

4 files changed

+47
-31
lines changed

init/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1875,6 +1875,7 @@ config SLUB_DEBUG
18751875
default y
18761876
bool "Enable SLUB debugging support" if EXPERT
18771877
depends on SLUB && SYSFS
1878+
select STACKDEPOT if STACKTRACE_SUPPORT
18781879
help
18791880
SLUB has extensive debug support features. Disabling these can
18801881
result in significant savings in code size. This also disables

lib/Kconfig.debug

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -709,6 +709,7 @@ config DEBUG_SLAB
709709
config SLUB_DEBUG_ON
710710
bool "SLUB debugging on by default"
711711
depends on SLUB && SLUB_DEBUG
712+
select STACKDEPOT_ALWAYS_INIT if STACKTRACE_SUPPORT
712713
default n
713714
help
714715
Boot with debugging on by default. SLUB boots by default with

mm/slab_common.c

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@
2424
#include <asm/tlbflush.h>
2525
#include <asm/page.h>
2626
#include <linux/memcontrol.h>
27+
#include <linux/stackdepot.h>
2728

2829
#define CREATE_TRACE_POINTS
2930
#include <trace/events/kmem.h>
@@ -314,9 +315,13 @@ kmem_cache_create_usercopy(const char *name,
314315
* If no slub_debug was enabled globally, the static key is not yet
315316
* enabled by setup_slub_debug(). Enable it if the cache is being
316317
* created with any of the debugging flags passed explicitly.
318+
* It's also possible that this is the first cache created with
319+
* SLAB_STORE_USER and we should init stack_depot for it.
317320
*/
318321
if (flags & SLAB_DEBUG_FLAGS)
319322
static_branch_enable(&slub_debug_enabled);
323+
if (flags & SLAB_STORE_USER)
324+
stack_depot_init();
320325
#endif
321326

322327
mutex_lock(&slab_mutex);

mm/slub.c

Lines changed: 40 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@
2626
#include <linux/cpuset.h>
2727
#include <linux/mempolicy.h>
2828
#include <linux/ctype.h>
29+
#include <linux/stackdepot.h>
2930
#include <linux/debugobjects.h>
3031
#include <linux/kallsyms.h>
3132
#include <linux/kfence.h>
@@ -264,8 +265,8 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
264265
#define TRACK_ADDRS_COUNT 16
265266
struct track {
266267
unsigned long addr; /* Called from address */
267-
#ifdef CONFIG_STACKTRACE
268-
unsigned long addrs[TRACK_ADDRS_COUNT]; /* Called from address */
268+
#ifdef CONFIG_STACKDEPOT
269+
depot_stack_handle_t handle;
269270
#endif
270271
int cpu; /* Was running on cpu */
271272
int pid; /* Pid context */
@@ -724,22 +725,19 @@ static struct track *get_track(struct kmem_cache *s, void *object,
724725
return kasan_reset_tag(p + alloc);
725726
}
726727

727-
static void set_track(struct kmem_cache *s, void *object,
728+
static void noinline set_track(struct kmem_cache *s, void *object,
728729
enum track_item alloc, unsigned long addr)
729730
{
730731
struct track *p = get_track(s, object, alloc);
731732

732-
#ifdef CONFIG_STACKTRACE
733+
#ifdef CONFIG_STACKDEPOT
734+
unsigned long entries[TRACK_ADDRS_COUNT];
733735
unsigned int nr_entries;
734736

735-
metadata_access_enable();
736-
nr_entries = stack_trace_save(kasan_reset_tag(p->addrs),
737-
TRACK_ADDRS_COUNT, 3);
738-
metadata_access_disable();
739-
740-
if (nr_entries < TRACK_ADDRS_COUNT)
741-
p->addrs[nr_entries] = 0;
737+
nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 3);
738+
p->handle = stack_depot_save(entries, nr_entries, GFP_NOWAIT);
742739
#endif
740+
743741
p->addr = addr;
744742
p->cpu = smp_processor_id();
745743
p->pid = current->pid;
@@ -759,20 +757,19 @@ static void init_tracking(struct kmem_cache *s, void *object)
759757

760758
static void print_track(const char *s, struct track *t, unsigned long pr_time)
761759
{
760+
depot_stack_handle_t handle __maybe_unused;
761+
762762
if (!t->addr)
763763
return;
764764

765765
pr_err("%s in %pS age=%lu cpu=%u pid=%d\n",
766766
s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid);
767-
#ifdef CONFIG_STACKTRACE
768-
{
769-
int i;
770-
for (i = 0; i < TRACK_ADDRS_COUNT; i++)
771-
if (t->addrs[i])
772-
pr_err("\t%pS\n", (void *)t->addrs[i]);
773-
else
774-
break;
775-
}
767+
#ifdef CONFIG_STACKDEPOT
768+
handle = READ_ONCE(t->handle);
769+
if (handle)
770+
stack_depot_print(handle);
771+
else
772+
pr_err("object allocation/free stack trace missing\n");
776773
#endif
777774
}
778775

@@ -1532,6 +1529,8 @@ static int __init setup_slub_debug(char *str)
15321529
global_slub_debug_changed = true;
15331530
} else {
15341531
slab_list_specified = true;
1532+
if (flags & SLAB_STORE_USER)
1533+
stack_depot_want_early_init();
15351534
}
15361535
}
15371536

@@ -1549,6 +1548,8 @@ static int __init setup_slub_debug(char *str)
15491548
}
15501549
out:
15511550
slub_debug = global_flags;
1551+
if (slub_debug & SLAB_STORE_USER)
1552+
stack_depot_want_early_init();
15521553
if (slub_debug != 0 || slub_debug_string)
15531554
static_branch_enable(&slub_debug_enabled);
15541555
else
@@ -4342,18 +4343,26 @@ void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
43424343
objp = fixup_red_left(s, objp);
43434344
trackp = get_track(s, objp, TRACK_ALLOC);
43444345
kpp->kp_ret = (void *)trackp->addr;
4345-
#ifdef CONFIG_STACKTRACE
4346-
for (i = 0; i < KS_ADDRS_COUNT && i < TRACK_ADDRS_COUNT; i++) {
4347-
kpp->kp_stack[i] = (void *)trackp->addrs[i];
4348-
if (!kpp->kp_stack[i])
4349-
break;
4350-
}
4346+
#ifdef CONFIG_STACKDEPOT
4347+
{
4348+
depot_stack_handle_t handle;
4349+
unsigned long *entries;
4350+
unsigned int nr_entries;
4351+
4352+
handle = READ_ONCE(trackp->handle);
4353+
if (handle) {
4354+
nr_entries = stack_depot_fetch(handle, &entries);
4355+
for (i = 0; i < KS_ADDRS_COUNT && i < nr_entries; i++)
4356+
kpp->kp_stack[i] = (void *)entries[i];
4357+
}
43514358

4352-
trackp = get_track(s, objp, TRACK_FREE);
4353-
for (i = 0; i < KS_ADDRS_COUNT && i < TRACK_ADDRS_COUNT; i++) {
4354-
kpp->kp_free_stack[i] = (void *)trackp->addrs[i];
4355-
if (!kpp->kp_free_stack[i])
4356-
break;
4359+
trackp = get_track(s, objp, TRACK_FREE);
4360+
handle = READ_ONCE(trackp->handle);
4361+
if (handle) {
4362+
nr_entries = stack_depot_fetch(handle, &entries);
4363+
for (i = 0; i < KS_ADDRS_COUNT && i < nr_entries; i++)
4364+
kpp->kp_free_stack[i] = (void *)entries[i];
4365+
}
43574366
}
43584367
#endif
43594368
#endif

0 commit comments

Comments
 (0)