Skip to content

Commit f545154

Browse files
KAGA-KOKOtehcaster
authored andcommitted
mm, slab/slub: Ensure kmem_cache_alloc_bulk() is available early
The memory allocators are available during early boot even in the phase where interrupts are disabled and scheduling is not yet possible. The setup is so that GFP_KERNEL allocations work in this phase without causing might_alloc() splats to be emitted because the system state is SYSTEM_BOOTING at that point which prevents the warnings to trigger. Most allocation/free functions use local_irq_save()/restore() or a lock variant of that. But kmem_cache_alloc_bulk() and kmem_cache_free_bulk() use local_[lock]_irq_disable()/enable(), which leads to a lockdep warning when interrupts are enabled during the early boot phase. This went unnoticed so far as there are no early users of these interfaces. The upcoming conversion of the interrupt descriptor store from radix_tree to maple_tree triggered this warning as maple_tree uses the bulk interface. Cure this by moving the kmem_cache_alloc/free() bulk variants of SLUB and SLAB to local[_lock]_irq_save()/restore(). There is obviously no reclaim possible and required at this point so there is no need to expand this coverage further. No functional change. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
1 parent aa4a860 commit f545154

File tree

2 files changed

+15
-12
lines changed

2 files changed

+15
-12
lines changed

mm/slab.c

Lines changed: 10 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -3477,14 +3477,15 @@ cache_alloc_debugcheck_after_bulk(struct kmem_cache *s, gfp_t flags,
34773477
int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
34783478
void **p)
34793479
{
3480-
size_t i;
34813480
struct obj_cgroup *objcg = NULL;
3481+
unsigned long irqflags;
3482+
size_t i;
34823483

34833484
s = slab_pre_alloc_hook(s, NULL, &objcg, size, flags);
34843485
if (!s)
34853486
return 0;
34863487

3487-
local_irq_disable();
3488+
local_irq_save(irqflags);
34883489
for (i = 0; i < size; i++) {
34893490
void *objp = kfence_alloc(s, s->object_size, flags) ?:
34903491
__do_cache_alloc(s, flags, NUMA_NO_NODE);
@@ -3493,7 +3494,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
34933494
goto error;
34943495
p[i] = objp;
34953496
}
3496-
local_irq_enable();
3497+
local_irq_restore(irqflags);
34973498

34983499
cache_alloc_debugcheck_after_bulk(s, flags, size, p, _RET_IP_);
34993500

@@ -3506,7 +3507,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
35063507
/* FIXME: Trace call missing. Christoph would like a bulk variant */
35073508
return size;
35083509
error:
3509-
local_irq_enable();
3510+
local_irq_restore(irqflags);
35103511
cache_alloc_debugcheck_after_bulk(s, flags, i, p, _RET_IP_);
35113512
slab_post_alloc_hook(s, objcg, flags, i, p, false, s->object_size);
35123513
kmem_cache_free_bulk(s, i, p);
@@ -3608,8 +3609,9 @@ EXPORT_SYMBOL(kmem_cache_free);
36083609

36093610
void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p)
36103611
{
3612+
unsigned long flags;
36113613

3612-
local_irq_disable();
3614+
local_irq_save(flags);
36133615
for (int i = 0; i < size; i++) {
36143616
void *objp = p[i];
36153617
struct kmem_cache *s;
@@ -3619,9 +3621,9 @@ void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p)
36193621

36203622
/* called via kfree_bulk */
36213623
if (!folio_test_slab(folio)) {
3622-
local_irq_enable();
3624+
local_irq_restore(flags);
36233625
free_large_kmalloc(folio, objp);
3624-
local_irq_disable();
3626+
local_irq_save(flags);
36253627
continue;
36263628
}
36273629
s = folio_slab(folio)->slab_cache;
@@ -3638,7 +3640,7 @@ void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p)
36383640

36393641
__cache_free(s, objp, _RET_IP_);
36403642
}
3641-
local_irq_enable();
3643+
local_irq_restore(flags);
36423644

36433645
/* FIXME: add tracing */
36443646
}

mm/slub.c

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -3913,6 +3913,7 @@ static inline int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags,
39133913
size_t size, void **p, struct obj_cgroup *objcg)
39143914
{
39153915
struct kmem_cache_cpu *c;
3916+
unsigned long irqflags;
39163917
int i;
39173918

39183919
/*
@@ -3921,7 +3922,7 @@ static inline int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags,
39213922
* handlers invoking normal fastpath.
39223923
*/
39233924
c = slub_get_cpu_ptr(s->cpu_slab);
3924-
local_lock_irq(&s->cpu_slab->lock);
3925+
local_lock_irqsave(&s->cpu_slab->lock, irqflags);
39253926

39263927
for (i = 0; i < size; i++) {
39273928
void *object = kfence_alloc(s, s->object_size, flags);
@@ -3942,7 +3943,7 @@ static inline int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags,
39423943
*/
39433944
c->tid = next_tid(c->tid);
39443945

3945-
local_unlock_irq(&s->cpu_slab->lock);
3946+
local_unlock_irqrestore(&s->cpu_slab->lock, irqflags);
39463947

39473948
/*
39483949
* Invoking slow path likely have side-effect
@@ -3956,7 +3957,7 @@ static inline int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags,
39563957
c = this_cpu_ptr(s->cpu_slab);
39573958
maybe_wipe_obj_freeptr(s, p[i]);
39583959

3959-
local_lock_irq(&s->cpu_slab->lock);
3960+
local_lock_irqsave(&s->cpu_slab->lock, irqflags);
39603961

39613962
continue; /* goto for-loop */
39623963
}
@@ -3965,7 +3966,7 @@ static inline int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags,
39653966
maybe_wipe_obj_freeptr(s, p[i]);
39663967
}
39673968
c->tid = next_tid(c->tid);
3968-
local_unlock_irq(&s->cpu_slab->lock);
3969+
local_unlock_irqrestore(&s->cpu_slab->lock, irqflags);
39693970
slub_put_cpu_ptr(s->cpu_slab);
39703971

39713972
return i;

0 commit comments

Comments
 (0)