Skip to content

Commit 0dd1cab

Browse files
committed
Merge tag 'slab-for-5.20_or_6.0' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab
Pull slab updates from Vlastimil Babka: - An addition of 'accounted' flag to slab allocation tracepoints to indicate memcg_kmem accounting, by Vasily - An optimization of memcg handling in freeing paths, by Muchun - Various smaller fixes and cleanups * tag 'slab-for-5.20_or_6.0' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab: mm/slab_common: move generic bulk alloc/free functions to SLOB mm/sl[au]b: use own bulk free function when bulk alloc failed mm: slab: optimize memcg_slab_free_hook() mm/tracing: add 'accounted' entry into output of allocation tracepoints tools/vm/slabinfo: Handle files in debugfs mm/slub: Simplify __kmem_cache_alias() mm, slab: fix bad alignments
2 parents 0cec3f2 + 3041808 commit 0dd1cab

File tree

7 files changed

+134
-158
lines changed

7 files changed

+134
-158
lines changed

include/trace/events/kmem.h

Lines changed: 26 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -13,18 +13,20 @@ DECLARE_EVENT_CLASS(kmem_alloc,
1313

1414
TP_PROTO(unsigned long call_site,
1515
const void *ptr,
16+
struct kmem_cache *s,
1617
size_t bytes_req,
1718
size_t bytes_alloc,
1819
gfp_t gfp_flags),
1920

20-
TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags),
21+
TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags),
2122

2223
TP_STRUCT__entry(
2324
__field( unsigned long, call_site )
2425
__field( const void *, ptr )
2526
__field( size_t, bytes_req )
2627
__field( size_t, bytes_alloc )
2728
__field( unsigned long, gfp_flags )
29+
__field( bool, accounted )
2830
),
2931

3032
TP_fast_assign(
@@ -33,42 +35,47 @@ DECLARE_EVENT_CLASS(kmem_alloc,
3335
__entry->bytes_req = bytes_req;
3436
__entry->bytes_alloc = bytes_alloc;
3537
__entry->gfp_flags = (__force unsigned long)gfp_flags;
38+
__entry->accounted = IS_ENABLED(CONFIG_MEMCG_KMEM) ?
39+
((gfp_flags & __GFP_ACCOUNT) ||
40+
(s && s->flags & SLAB_ACCOUNT)) : false;
3641
),
3742

38-
TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s",
43+
TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s accounted=%s",
3944
(void *)__entry->call_site,
4045
__entry->ptr,
4146
__entry->bytes_req,
4247
__entry->bytes_alloc,
43-
show_gfp_flags(__entry->gfp_flags))
48+
show_gfp_flags(__entry->gfp_flags),
49+
__entry->accounted ? "true" : "false")
4450
);
4551

4652
DEFINE_EVENT(kmem_alloc, kmalloc,
4753

48-
TP_PROTO(unsigned long call_site, const void *ptr,
54+
TP_PROTO(unsigned long call_site, const void *ptr, struct kmem_cache *s,
4955
size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
5056

51-
TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)
57+
TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags)
5258
);
5359

5460
DEFINE_EVENT(kmem_alloc, kmem_cache_alloc,
5561

56-
TP_PROTO(unsigned long call_site, const void *ptr,
62+
TP_PROTO(unsigned long call_site, const void *ptr, struct kmem_cache *s,
5763
size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
5864

59-
TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)
65+
TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags)
6066
);
6167

6268
DECLARE_EVENT_CLASS(kmem_alloc_node,
6369

6470
TP_PROTO(unsigned long call_site,
6571
const void *ptr,
72+
struct kmem_cache *s,
6673
size_t bytes_req,
6774
size_t bytes_alloc,
6875
gfp_t gfp_flags,
6976
int node),
7077

71-
TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node),
78+
TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags, node),
7279

7380
TP_STRUCT__entry(
7481
__field( unsigned long, call_site )
@@ -77,6 +84,7 @@ DECLARE_EVENT_CLASS(kmem_alloc_node,
7784
__field( size_t, bytes_alloc )
7885
__field( unsigned long, gfp_flags )
7986
__field( int, node )
87+
__field( bool, accounted )
8088
),
8189

8290
TP_fast_assign(
@@ -86,33 +94,37 @@ DECLARE_EVENT_CLASS(kmem_alloc_node,
8694
__entry->bytes_alloc = bytes_alloc;
8795
__entry->gfp_flags = (__force unsigned long)gfp_flags;
8896
__entry->node = node;
97+
__entry->accounted = IS_ENABLED(CONFIG_MEMCG_KMEM) ?
98+
((gfp_flags & __GFP_ACCOUNT) ||
99+
(s && s->flags & SLAB_ACCOUNT)) : false;
89100
),
90101

91-
TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d",
102+
TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d accounted=%s",
92103
(void *)__entry->call_site,
93104
__entry->ptr,
94105
__entry->bytes_req,
95106
__entry->bytes_alloc,
96107
show_gfp_flags(__entry->gfp_flags),
97-
__entry->node)
108+
__entry->node,
109+
__entry->accounted ? "true" : "false")
98110
);
99111

100112
DEFINE_EVENT(kmem_alloc_node, kmalloc_node,
101113

102114
TP_PROTO(unsigned long call_site, const void *ptr,
103-
size_t bytes_req, size_t bytes_alloc,
115+
struct kmem_cache *s, size_t bytes_req, size_t bytes_alloc,
104116
gfp_t gfp_flags, int node),
105117

106-
TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node)
118+
TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags, node)
107119
);
108120

109121
DEFINE_EVENT(kmem_alloc_node, kmem_cache_alloc_node,
110122

111123
TP_PROTO(unsigned long call_site, const void *ptr,
112-
size_t bytes_req, size_t bytes_alloc,
124+
struct kmem_cache *s, size_t bytes_req, size_t bytes_alloc,
113125
gfp_t gfp_flags, int node),
114126

115-
TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node)
127+
TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags, node)
116128
);
117129

118130
TRACE_EVENT(kfree,

mm/slab.c

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -3230,7 +3230,7 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, size_t orig_
32303230
}
32313231
/* ___cache_alloc_node can fall back to other nodes */
32323232
ptr = ____cache_alloc_node(cachep, flags, nodeid);
3233-
out:
3233+
out:
32343234
local_irq_restore(save_flags);
32353235
ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
32363236
init = slab_want_init_on_alloc(flags, cachep);
@@ -3259,7 +3259,7 @@ __do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
32593259
if (!objp)
32603260
objp = ____cache_alloc_node(cache, flags, numa_mem_id());
32613261

3262-
out:
3262+
out:
32633263
return objp;
32643264
}
32653265
#else
@@ -3406,9 +3406,10 @@ static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp,
34063406
{
34073407
bool init;
34083408

3409+
memcg_slab_free_hook(cachep, virt_to_slab(objp), &objp, 1);
3410+
34093411
if (is_kfence_address(objp)) {
34103412
kmemleak_free_recursive(objp, cachep->flags);
3411-
memcg_slab_free_hook(cachep, &objp, 1);
34123413
__kfence_free(objp);
34133414
return;
34143415
}
@@ -3441,7 +3442,6 @@ void ___cache_free(struct kmem_cache *cachep, void *objp,
34413442
check_irq_off();
34423443
kmemleak_free_recursive(objp, cachep->flags);
34433444
objp = cache_free_debugcheck(cachep, objp, caller);
3444-
memcg_slab_free_hook(cachep, &objp, 1);
34453445

34463446
/*
34473447
* Skip calling cache_free_alien() when the platform is not numa.
@@ -3478,7 +3478,7 @@ void *__kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru,
34783478
{
34793479
void *ret = slab_alloc(cachep, lru, flags, cachep->object_size, _RET_IP_);
34803480

3481-
trace_kmem_cache_alloc(_RET_IP_, ret,
3481+
trace_kmem_cache_alloc(_RET_IP_, ret, cachep,
34823482
cachep->object_size, cachep->size, flags);
34833483

34843484
return ret;
@@ -3553,7 +3553,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
35533553
local_irq_enable();
35543554
cache_alloc_debugcheck_after_bulk(s, flags, i, p, _RET_IP_);
35553555
slab_post_alloc_hook(s, objcg, flags, i, p, false);
3556-
__kmem_cache_free_bulk(s, i, p);
3556+
kmem_cache_free_bulk(s, i, p);
35573557
return 0;
35583558
}
35593559
EXPORT_SYMBOL(kmem_cache_alloc_bulk);
@@ -3567,7 +3567,7 @@ kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
35673567
ret = slab_alloc(cachep, NULL, flags, size, _RET_IP_);
35683568

35693569
ret = kasan_kmalloc(cachep, ret, size, flags);
3570-
trace_kmalloc(_RET_IP_, ret,
3570+
trace_kmalloc(_RET_IP_, ret, cachep,
35713571
size, cachep->size, flags);
35723572
return ret;
35733573
}
@@ -3592,7 +3592,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
35923592
{
35933593
void *ret = slab_alloc_node(cachep, flags, nodeid, cachep->object_size, _RET_IP_);
35943594

3595-
trace_kmem_cache_alloc_node(_RET_IP_, ret,
3595+
trace_kmem_cache_alloc_node(_RET_IP_, ret, cachep,
35963596
cachep->object_size, cachep->size,
35973597
flags, nodeid);
35983598

@@ -3611,7 +3611,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
36113611
ret = slab_alloc_node(cachep, flags, nodeid, size, _RET_IP_);
36123612

36133613
ret = kasan_kmalloc(cachep, ret, size, flags);
3614-
trace_kmalloc_node(_RET_IP_, ret,
3614+
trace_kmalloc_node(_RET_IP_, ret, cachep,
36153615
size, cachep->size,
36163616
flags, nodeid);
36173617
return ret;
@@ -3694,7 +3694,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
36943694
ret = slab_alloc(cachep, NULL, flags, size, caller);
36953695

36963696
ret = kasan_kmalloc(cachep, ret, size, flags);
3697-
trace_kmalloc(caller, ret,
3697+
trace_kmalloc(caller, ret, cachep,
36983698
size, cachep->size, flags);
36993699

37003700
return ret;

mm/slab.h

Lines changed: 8 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -380,15 +380,6 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
380380
ssize_t slabinfo_write(struct file *file, const char __user *buffer,
381381
size_t count, loff_t *ppos);
382382

383-
/*
384-
* Generic implementation of bulk operations
385-
* These are useful for situations in which the allocator cannot
386-
* perform optimizations. In that case segments of the object listed
387-
* may be allocated or freed using these operations.
388-
*/
389-
void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
390-
int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
391-
392383
static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s)
393384
{
394385
return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
@@ -547,36 +538,22 @@ static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
547538
obj_cgroup_put(objcg);
548539
}
549540

550-
static inline void memcg_slab_free_hook(struct kmem_cache *s_orig,
541+
static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
551542
void **p, int objects)
552543
{
553-
struct kmem_cache *s;
554544
struct obj_cgroup **objcgs;
555-
struct obj_cgroup *objcg;
556-
struct slab *slab;
557-
unsigned int off;
558545
int i;
559546

560547
if (!memcg_kmem_enabled())
561548
return;
562549

563-
for (i = 0; i < objects; i++) {
564-
if (unlikely(!p[i]))
565-
continue;
566-
567-
slab = virt_to_slab(p[i]);
568-
/* we could be given a kmalloc_large() object, skip those */
569-
if (!slab)
570-
continue;
571-
572-
objcgs = slab_objcgs(slab);
573-
if (!objcgs)
574-
continue;
550+
objcgs = slab_objcgs(slab);
551+
if (!objcgs)
552+
return;
575553

576-
if (!s_orig)
577-
s = slab->slab_cache;
578-
else
579-
s = s_orig;
554+
for (i = 0; i < objects; i++) {
555+
struct obj_cgroup *objcg;
556+
unsigned int off;
580557

581558
off = obj_to_index(s, slab, p[i]);
582559
objcg = objcgs[off];
@@ -628,7 +605,7 @@ static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
628605
{
629606
}
630607

631-
static inline void memcg_slab_free_hook(struct kmem_cache *s,
608+
static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
632609
void **p, int objects)
633610
{
634611
}

mm/slab_common.c

Lines changed: 4 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -26,13 +26,12 @@
2626
#include <linux/memcontrol.h>
2727
#include <linux/stackdepot.h>
2828

29-
#define CREATE_TRACE_POINTS
30-
#include <trace/events/kmem.h>
31-
3229
#include "internal.h"
33-
3430
#include "slab.h"
3531

32+
#define CREATE_TRACE_POINTS
33+
#include <trace/events/kmem.h>
34+
3635
enum slab_state slab_state;
3736
LIST_HEAD(slab_caches);
3837
DEFINE_MUTEX(slab_mutex);
@@ -105,33 +104,6 @@ static inline int kmem_cache_sanity_check(const char *name, unsigned int size)
105104
}
106105
#endif
107106

108-
void __kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p)
109-
{
110-
size_t i;
111-
112-
for (i = 0; i < nr; i++) {
113-
if (s)
114-
kmem_cache_free(s, p[i]);
115-
else
116-
kfree(p[i]);
117-
}
118-
}
119-
120-
int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
121-
void **p)
122-
{
123-
size_t i;
124-
125-
for (i = 0; i < nr; i++) {
126-
void *x = p[i] = kmem_cache_alloc(s, flags);
127-
if (!x) {
128-
__kmem_cache_free_bulk(s, i, p);
129-
return 0;
130-
}
131-
}
132-
return i;
133-
}
134-
135107
/*
136108
* Figure out what the alignment of the objects will be given a set of
137109
* flags, a user specified alignment and the size of the objects.
@@ -959,7 +931,7 @@ EXPORT_SYMBOL(kmalloc_order);
959931
void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
960932
{
961933
void *ret = kmalloc_order(size, flags, order);
962-
trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
934+
trace_kmalloc(_RET_IP_, ret, NULL, size, PAGE_SIZE << order, flags);
963935
return ret;
964936
}
965937
EXPORT_SYMBOL(kmalloc_order_trace);

0 commit comments

Comments
 (0)