Skip to content

Commit b347aa7

Browse files
Vasily Averintehcaster
authored andcommitted
mm/tracing: add 'accounted' entry into output of allocation tracepoints
Slab caches marked with SLAB_ACCOUNT force accounting for every allocation from this cache even if __GFP_ACCOUNT flag is not passed. Unfortunately, at the moment this flag is not visible in ftrace output, and this makes it difficult to analyze the accounted allocations. This patch adds boolean "accounted" entry into trace output, and set it to 'true' for calls used __GFP_ACCOUNT flag and for allocations from caches marked with SLAB_ACCOUNT. Set it to 'false' if accounting is disabled in configs. Signed-off-by: Vasily Averin <vvs@openvz.org> Acked-by: Shakeel Butt <shakeelb@google.com> Acked-by: Roman Gushchin <roman.gushchin@linux.dev> Acked-by: Muchun Song <songmuchun@bytedance.com> Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Link: https://lore.kernel.org/r/c418ed25-65fe-f623-fbf8-1676528859ed@openvz.org Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
1 parent 0c7e0d6 commit b347aa7

File tree

5 files changed

+49
-38
lines changed

5 files changed

+49
-38
lines changed

include/trace/events/kmem.h

Lines changed: 26 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -13,18 +13,20 @@ DECLARE_EVENT_CLASS(kmem_alloc,
1313

1414
TP_PROTO(unsigned long call_site,
1515
const void *ptr,
16+
struct kmem_cache *s,
1617
size_t bytes_req,
1718
size_t bytes_alloc,
1819
gfp_t gfp_flags),
1920

20-
TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags),
21+
TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags),
2122

2223
TP_STRUCT__entry(
2324
__field( unsigned long, call_site )
2425
__field( const void *, ptr )
2526
__field( size_t, bytes_req )
2627
__field( size_t, bytes_alloc )
2728
__field( unsigned long, gfp_flags )
29+
__field( bool, accounted )
2830
),
2931

3032
TP_fast_assign(
@@ -33,42 +35,47 @@ DECLARE_EVENT_CLASS(kmem_alloc,
3335
__entry->bytes_req = bytes_req;
3436
__entry->bytes_alloc = bytes_alloc;
3537
__entry->gfp_flags = (__force unsigned long)gfp_flags;
38+
__entry->accounted = IS_ENABLED(CONFIG_MEMCG_KMEM) ?
39+
((gfp_flags & __GFP_ACCOUNT) ||
40+
(s && s->flags & SLAB_ACCOUNT)) : false;
3641
),
3742

38-
TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s",
43+
TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s accounted=%s",
3944
(void *)__entry->call_site,
4045
__entry->ptr,
4146
__entry->bytes_req,
4247
__entry->bytes_alloc,
43-
show_gfp_flags(__entry->gfp_flags))
48+
show_gfp_flags(__entry->gfp_flags),
49+
__entry->accounted ? "true" : "false")
4450
);
4551

4652
DEFINE_EVENT(kmem_alloc, kmalloc,
4753

48-
TP_PROTO(unsigned long call_site, const void *ptr,
54+
TP_PROTO(unsigned long call_site, const void *ptr, struct kmem_cache *s,
4955
size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
5056

51-
TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)
57+
TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags)
5258
);
5359

5460
DEFINE_EVENT(kmem_alloc, kmem_cache_alloc,
5561

56-
TP_PROTO(unsigned long call_site, const void *ptr,
62+
TP_PROTO(unsigned long call_site, const void *ptr, struct kmem_cache *s,
5763
size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
5864

59-
TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)
65+
TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags)
6066
);
6167

6268
DECLARE_EVENT_CLASS(kmem_alloc_node,
6369

6470
TP_PROTO(unsigned long call_site,
6571
const void *ptr,
72+
struct kmem_cache *s,
6673
size_t bytes_req,
6774
size_t bytes_alloc,
6875
gfp_t gfp_flags,
6976
int node),
7077

71-
TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node),
78+
TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags, node),
7279

7380
TP_STRUCT__entry(
7481
__field( unsigned long, call_site )
@@ -77,6 +84,7 @@ DECLARE_EVENT_CLASS(kmem_alloc_node,
7784
__field( size_t, bytes_alloc )
7885
__field( unsigned long, gfp_flags )
7986
__field( int, node )
87+
__field( bool, accounted )
8088
),
8189

8290
TP_fast_assign(
@@ -86,33 +94,37 @@ DECLARE_EVENT_CLASS(kmem_alloc_node,
8694
__entry->bytes_alloc = bytes_alloc;
8795
__entry->gfp_flags = (__force unsigned long)gfp_flags;
8896
__entry->node = node;
97+
__entry->accounted = IS_ENABLED(CONFIG_MEMCG_KMEM) ?
98+
((gfp_flags & __GFP_ACCOUNT) ||
99+
(s && s->flags & SLAB_ACCOUNT)) : false;
89100
),
90101

91-
TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d",
102+
TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d accounted=%s",
92103
(void *)__entry->call_site,
93104
__entry->ptr,
94105
__entry->bytes_req,
95106
__entry->bytes_alloc,
96107
show_gfp_flags(__entry->gfp_flags),
97-
__entry->node)
108+
__entry->node,
109+
__entry->accounted ? "true" : "false")
98110
);
99111

100112
DEFINE_EVENT(kmem_alloc_node, kmalloc_node,
101113

102114
TP_PROTO(unsigned long call_site, const void *ptr,
103-
size_t bytes_req, size_t bytes_alloc,
115+
struct kmem_cache *s, size_t bytes_req, size_t bytes_alloc,
104116
gfp_t gfp_flags, int node),
105117

106-
TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node)
118+
TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags, node)
107119
);
108120

109121
DEFINE_EVENT(kmem_alloc_node, kmem_cache_alloc_node,
110122

111123
TP_PROTO(unsigned long call_site, const void *ptr,
112-
size_t bytes_req, size_t bytes_alloc,
124+
struct kmem_cache *s, size_t bytes_req, size_t bytes_alloc,
113125
gfp_t gfp_flags, int node),
114126

115-
TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node)
127+
TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags, node)
116128
);
117129

118130
TRACE_EVENT(kfree,

mm/slab.c

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -3478,7 +3478,7 @@ void *__kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru,
34783478
{
34793479
void *ret = slab_alloc(cachep, lru, flags, cachep->object_size, _RET_IP_);
34803480

3481-
trace_kmem_cache_alloc(_RET_IP_, ret,
3481+
trace_kmem_cache_alloc(_RET_IP_, ret, cachep,
34823482
cachep->object_size, cachep->size, flags);
34833483

34843484
return ret;
@@ -3567,7 +3567,7 @@ kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
35673567
ret = slab_alloc(cachep, NULL, flags, size, _RET_IP_);
35683568

35693569
ret = kasan_kmalloc(cachep, ret, size, flags);
3570-
trace_kmalloc(_RET_IP_, ret,
3570+
trace_kmalloc(_RET_IP_, ret, cachep,
35713571
size, cachep->size, flags);
35723572
return ret;
35733573
}
@@ -3592,7 +3592,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
35923592
{
35933593
void *ret = slab_alloc_node(cachep, flags, nodeid, cachep->object_size, _RET_IP_);
35943594

3595-
trace_kmem_cache_alloc_node(_RET_IP_, ret,
3595+
trace_kmem_cache_alloc_node(_RET_IP_, ret, cachep,
35963596
cachep->object_size, cachep->size,
35973597
flags, nodeid);
35983598

@@ -3611,7 +3611,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
36113611
ret = slab_alloc_node(cachep, flags, nodeid, size, _RET_IP_);
36123612

36133613
ret = kasan_kmalloc(cachep, ret, size, flags);
3614-
trace_kmalloc_node(_RET_IP_, ret,
3614+
trace_kmalloc_node(_RET_IP_, ret, cachep,
36153615
size, cachep->size,
36163616
flags, nodeid);
36173617
return ret;
@@ -3694,7 +3694,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
36943694
ret = slab_alloc(cachep, NULL, flags, size, caller);
36953695

36963696
ret = kasan_kmalloc(cachep, ret, size, flags);
3697-
trace_kmalloc(caller, ret,
3697+
trace_kmalloc(caller, ret, cachep,
36983698
size, cachep->size, flags);
36993699

37003700
return ret;

mm/slab_common.c

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -26,13 +26,12 @@
2626
#include <linux/memcontrol.h>
2727
#include <linux/stackdepot.h>
2828

29-
#define CREATE_TRACE_POINTS
30-
#include <trace/events/kmem.h>
31-
3229
#include "internal.h"
33-
3430
#include "slab.h"
3531

32+
#define CREATE_TRACE_POINTS
33+
#include <trace/events/kmem.h>
34+
3635
enum slab_state slab_state;
3736
LIST_HEAD(slab_caches);
3837
DEFINE_MUTEX(slab_mutex);
@@ -959,7 +958,7 @@ EXPORT_SYMBOL(kmalloc_order);
959958
void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
960959
{
961960
void *ret = kmalloc_order(size, flags, order);
962-
trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
961+
trace_kmalloc(_RET_IP_, ret, NULL, size, PAGE_SIZE << order, flags);
963962
return ret;
964963
}
965964
EXPORT_SYMBOL(kmalloc_order_trace);

mm/slob.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -507,7 +507,7 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
507507
*m = size;
508508
ret = (void *)m + minalign;
509509

510-
trace_kmalloc_node(caller, ret,
510+
trace_kmalloc_node(caller, ret, NULL,
511511
size, size + minalign, gfp, node);
512512
} else {
513513
unsigned int order = get_order(size);
@@ -516,7 +516,7 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
516516
gfp |= __GFP_COMP;
517517
ret = slob_new_pages(gfp, order, node);
518518

519-
trace_kmalloc_node(caller, ret,
519+
trace_kmalloc_node(caller, ret, NULL,
520520
size, PAGE_SIZE << order, gfp, node);
521521
}
522522

@@ -616,12 +616,12 @@ static void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
616616

617617
if (c->size < PAGE_SIZE) {
618618
b = slob_alloc(c->size, flags, c->align, node, 0);
619-
trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
619+
trace_kmem_cache_alloc_node(_RET_IP_, b, NULL, c->object_size,
620620
SLOB_UNITS(c->size) * SLOB_UNIT,
621621
flags, node);
622622
} else {
623623
b = slob_new_pages(flags, get_order(c->size), node);
624-
trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
624+
trace_kmem_cache_alloc_node(_RET_IP_, b, NULL, c->object_size,
625625
PAGE_SIZE << get_order(c->size),
626626
flags, node);
627627
}

mm/slub.c

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -3257,7 +3257,7 @@ void *__kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
32573257
{
32583258
void *ret = slab_alloc(s, lru, gfpflags, _RET_IP_, s->object_size);
32593259

3260-
trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size,
3260+
trace_kmem_cache_alloc(_RET_IP_, ret, s, s->object_size,
32613261
s->size, gfpflags);
32623262

32633263
return ret;
@@ -3280,7 +3280,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_lru);
32803280
void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
32813281
{
32823282
void *ret = slab_alloc(s, NULL, gfpflags, _RET_IP_, size);
3283-
trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
3283+
trace_kmalloc(_RET_IP_, ret, s, size, s->size, gfpflags);
32843284
ret = kasan_kmalloc(s, ret, size, gfpflags);
32853285
return ret;
32863286
}
@@ -3292,7 +3292,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
32923292
{
32933293
void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, s->object_size);
32943294

3295-
trace_kmem_cache_alloc_node(_RET_IP_, ret,
3295+
trace_kmem_cache_alloc_node(_RET_IP_, ret, s,
32963296
s->object_size, s->size, gfpflags, node);
32973297

32983298
return ret;
@@ -3306,7 +3306,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
33063306
{
33073307
void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, size);
33083308

3309-
trace_kmalloc_node(_RET_IP_, ret,
3309+
trace_kmalloc_node(_RET_IP_, ret, s,
33103310
size, s->size, gfpflags, node);
33113311

33123312
ret = kasan_kmalloc(s, ret, size, gfpflags);
@@ -4441,7 +4441,7 @@ void *__kmalloc(size_t size, gfp_t flags)
44414441

44424442
ret = slab_alloc(s, NULL, flags, _RET_IP_, size);
44434443

4444-
trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
4444+
trace_kmalloc(_RET_IP_, ret, s, size, s->size, flags);
44454445

44464446
ret = kasan_kmalloc(s, ret, size, flags);
44474447

@@ -4475,7 +4475,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
44754475
if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
44764476
ret = kmalloc_large_node(size, flags, node);
44774477

4478-
trace_kmalloc_node(_RET_IP_, ret,
4478+
trace_kmalloc_node(_RET_IP_, ret, NULL,
44794479
size, PAGE_SIZE << get_order(size),
44804480
flags, node);
44814481

@@ -4489,7 +4489,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
44894489

44904490
ret = slab_alloc_node(s, NULL, flags, node, _RET_IP_, size);
44914491

4492-
trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
4492+
trace_kmalloc_node(_RET_IP_, ret, s, size, s->size, flags, node);
44934493

44944494
ret = kasan_kmalloc(s, ret, size, flags);
44954495

@@ -4946,7 +4946,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
49464946
ret = slab_alloc(s, NULL, gfpflags, caller, size);
49474947

49484948
/* Honor the call site pointer we received. */
4949-
trace_kmalloc(caller, ret, size, s->size, gfpflags);
4949+
trace_kmalloc(caller, ret, s, size, s->size, gfpflags);
49504950

49514951
return ret;
49524952
}
@@ -4962,7 +4962,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
49624962
if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
49634963
ret = kmalloc_large_node(size, gfpflags, node);
49644964

4965-
trace_kmalloc_node(caller, ret,
4965+
trace_kmalloc_node(caller, ret, NULL,
49664966
size, PAGE_SIZE << get_order(size),
49674967
gfpflags, node);
49684968

@@ -4977,7 +4977,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
49774977
ret = slab_alloc_node(s, NULL, gfpflags, node, caller, size);
49784978

49794979
/* Honor the call site pointer we received. */
4980-
trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
4980+
trace_kmalloc_node(caller, ret, s, size, s->size, gfpflags, node);
49814981

49824982
return ret;
49834983
}

0 commit comments

Comments
 (0)