Skip to content

Commit cd43b50

Browse files
committed
Merge tag 'slab-for-6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab
Pull slab updates from Vlastimil Babka: "This time it's just a bunch of smaller cleanups and fixes for SLAB and SLUB: - Make it possible to use kmem_cache_alloc_bulk() early in boot when interrupts are not yet enabled, as code doing that started to appear via new maple tree users (Thomas Gleixner) - Fix debugfs-related memory leak in SLUB (Greg Kroah-Hartman) - Use the standard idiom to get head page of folio (SeongJae Park) - Simplify and inline is_debug_pagealloc_cache() in SLAB (lvqian) - Remove unused variable in SLAB (Gou Hao)" * tag 'slab-for-6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab: mm, slab/slub: Ensure kmem_cache_alloc_bulk() is available early mm/slub: fix memory leak with using debugfs_lookup() mm/slab.c: cleanup is_debug_pagealloc_cache() mm/sl{a,u}b: fix wrong usages of folio_page() for getting head pages mm/slab: remove unused slab_early_init
2 parents 2b79eb7 + b45bc2e commit cd43b50

File tree

2 files changed

+22
-25
lines changed

2 files changed

+22
-25
lines changed

mm/slab.c

Lines changed: 15 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -220,7 +220,6 @@ static inline void fixup_objfreelist_debug(struct kmem_cache *cachep,
220220
static inline void fixup_slab_list(struct kmem_cache *cachep,
221221
struct kmem_cache_node *n, struct slab *slab,
222222
void **list);
223-
static int slab_early_init = 1;
224223

225224
#define INDEX_NODE kmalloc_index(sizeof(struct kmem_cache_node))
226225

@@ -1249,8 +1248,6 @@ void __init kmem_cache_init(void)
12491248
slab_state = PARTIAL_NODE;
12501249
setup_kmalloc_cache_index_table();
12511250

1252-
slab_early_init = 0;
1253-
12541251
/* 5) Replace the bootstrap kmem_cache_node */
12551252
{
12561253
int nid;
@@ -1389,7 +1386,7 @@ static void kmem_freepages(struct kmem_cache *cachep, struct slab *slab)
13891386

13901387
BUG_ON(!folio_test_slab(folio));
13911388
__slab_clear_pfmemalloc(slab);
1392-
page_mapcount_reset(folio_page(folio, 0));
1389+
page_mapcount_reset(&folio->page);
13931390
folio->mapping = NULL;
13941391
/* Make the mapping reset visible before clearing the flag */
13951392
smp_wmb();
@@ -1398,7 +1395,7 @@ static void kmem_freepages(struct kmem_cache *cachep, struct slab *slab)
13981395
if (current->reclaim_state)
13991396
current->reclaim_state->reclaimed_slab += 1 << order;
14001397
unaccount_slab(slab, order, cachep);
1401-
__free_pages(folio_page(folio, 0), order);
1398+
__free_pages(&folio->page, order);
14021399
}
14031400

14041401
static void kmem_rcu_free(struct rcu_head *head)
@@ -1413,13 +1410,10 @@ static void kmem_rcu_free(struct rcu_head *head)
14131410
}
14141411

14151412
#if DEBUG
1416-
static bool is_debug_pagealloc_cache(struct kmem_cache *cachep)
1413+
static inline bool is_debug_pagealloc_cache(struct kmem_cache *cachep)
14171414
{
1418-
if (debug_pagealloc_enabled_static() && OFF_SLAB(cachep) &&
1419-
(cachep->size % PAGE_SIZE) == 0)
1420-
return true;
1421-
1422-
return false;
1415+
return debug_pagealloc_enabled_static() && OFF_SLAB(cachep) &&
1416+
((cachep->size % PAGE_SIZE) == 0);
14231417
}
14241418

14251419
#ifdef CONFIG_DEBUG_PAGEALLOC
@@ -3479,14 +3473,15 @@ cache_alloc_debugcheck_after_bulk(struct kmem_cache *s, gfp_t flags,
34793473
int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
34803474
void **p)
34813475
{
3482-
size_t i;
34833476
struct obj_cgroup *objcg = NULL;
3477+
unsigned long irqflags;
3478+
size_t i;
34843479

34853480
s = slab_pre_alloc_hook(s, NULL, &objcg, size, flags);
34863481
if (!s)
34873482
return 0;
34883483

3489-
local_irq_disable();
3484+
local_irq_save(irqflags);
34903485
for (i = 0; i < size; i++) {
34913486
void *objp = kfence_alloc(s, s->object_size, flags) ?:
34923487
__do_cache_alloc(s, flags, NUMA_NO_NODE);
@@ -3495,7 +3490,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
34953490
goto error;
34963491
p[i] = objp;
34973492
}
3498-
local_irq_enable();
3493+
local_irq_restore(irqflags);
34993494

35003495
cache_alloc_debugcheck_after_bulk(s, flags, size, p, _RET_IP_);
35013496

@@ -3508,7 +3503,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
35083503
/* FIXME: Trace call missing. Christoph would like a bulk variant */
35093504
return size;
35103505
error:
3511-
local_irq_enable();
3506+
local_irq_restore(irqflags);
35123507
cache_alloc_debugcheck_after_bulk(s, flags, i, p, _RET_IP_);
35133508
slab_post_alloc_hook(s, objcg, flags, i, p, false, s->object_size);
35143509
kmem_cache_free_bulk(s, i, p);
@@ -3610,8 +3605,9 @@ EXPORT_SYMBOL(kmem_cache_free);
36103605

36113606
void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p)
36123607
{
3608+
unsigned long flags;
36133609

3614-
local_irq_disable();
3610+
local_irq_save(flags);
36153611
for (int i = 0; i < size; i++) {
36163612
void *objp = p[i];
36173613
struct kmem_cache *s;
@@ -3621,9 +3617,9 @@ void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p)
36213617

36223618
/* called via kfree_bulk */
36233619
if (!folio_test_slab(folio)) {
3624-
local_irq_enable();
3620+
local_irq_restore(flags);
36253621
free_large_kmalloc(folio, objp);
3626-
local_irq_disable();
3622+
local_irq_save(flags);
36273623
continue;
36283624
}
36293625
s = folio_slab(folio)->slab_cache;
@@ -3640,7 +3636,7 @@ void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p)
36403636

36413637
__cache_free(s, objp, _RET_IP_);
36423638
}
3643-
local_irq_enable();
3639+
local_irq_restore(flags);
36443640

36453641
/* FIXME: add tracing */
36463642
}

mm/slub.c

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -2066,7 +2066,7 @@ static void __free_slab(struct kmem_cache *s, struct slab *slab)
20662066
if (current->reclaim_state)
20672067
current->reclaim_state->reclaimed_slab += pages;
20682068
unaccount_slab(slab, order, s);
2069-
__free_pages(folio_page(folio, 0), order);
2069+
__free_pages(&folio->page, order);
20702070
}
20712071

20722072
static void rcu_free_slab(struct rcu_head *h)
@@ -3913,6 +3913,7 @@ static inline int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags,
39133913
size_t size, void **p, struct obj_cgroup *objcg)
39143914
{
39153915
struct kmem_cache_cpu *c;
3916+
unsigned long irqflags;
39163917
int i;
39173918

39183919
/*
@@ -3921,7 +3922,7 @@ static inline int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags,
39213922
* handlers invoking normal fastpath.
39223923
*/
39233924
c = slub_get_cpu_ptr(s->cpu_slab);
3924-
local_lock_irq(&s->cpu_slab->lock);
3925+
local_lock_irqsave(&s->cpu_slab->lock, irqflags);
39253926

39263927
for (i = 0; i < size; i++) {
39273928
void *object = kfence_alloc(s, s->object_size, flags);
@@ -3942,7 +3943,7 @@ static inline int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags,
39423943
*/
39433944
c->tid = next_tid(c->tid);
39443945

3945-
local_unlock_irq(&s->cpu_slab->lock);
3946+
local_unlock_irqrestore(&s->cpu_slab->lock, irqflags);
39463947

39473948
/*
39483949
* Invoking slow path likely have side-effect
@@ -3956,7 +3957,7 @@ static inline int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags,
39563957
c = this_cpu_ptr(s->cpu_slab);
39573958
maybe_wipe_obj_freeptr(s, p[i]);
39583959

3959-
local_lock_irq(&s->cpu_slab->lock);
3960+
local_lock_irqsave(&s->cpu_slab->lock, irqflags);
39603961

39613962
continue; /* goto for-loop */
39623963
}
@@ -3965,7 +3966,7 @@ static inline int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags,
39653966
maybe_wipe_obj_freeptr(s, p[i]);
39663967
}
39673968
c->tid = next_tid(c->tid);
3968-
local_unlock_irq(&s->cpu_slab->lock);
3969+
local_unlock_irqrestore(&s->cpu_slab->lock, irqflags);
39693970
slub_put_cpu_ptr(s->cpu_slab);
39703971

39713972
return i;
@@ -6449,7 +6450,7 @@ static void debugfs_slab_add(struct kmem_cache *s)
64496450

64506451
void debugfs_slab_release(struct kmem_cache *s)
64516452
{
6452-
debugfs_remove_recursive(debugfs_lookup(s->name, slab_debugfs_root));
6453+
debugfs_lookup_and_remove(s->name, slab_debugfs_root);
64536454
}
64546455

64556456
static int __init slab_debugfs_init(void)

0 commit comments

Comments
 (0)