Skip to content

Commit 9e19aa1

Browse files
committed
Merge branch 'slab/for-6.13/features' into slab/for-next
Merge the slab feature branch for 6.13: - Add new slab_strict_numa parameter for per-object memory policies (Christoph Lameter)
2 parents 2420baa + f7c80fa commit 9e19aa1

File tree

3 files changed

+62
-0
lines changed

3 files changed

+62
-0
lines changed

Documentation/admin-guide/kernel-parameters.txt

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6147,6 +6147,16 @@
61476147
For more information see Documentation/mm/slub.rst.
61486148
(slub_nomerge legacy name also accepted for now)
61496149

6150+
slab_strict_numa [MM]
6151+
Support memory policies on a per object level
6152+
in the slab allocator. The default is for memory
6153+
policies to be applied at the folio level when
6154+
a new folio is needed or a partial folio is
6155+
retrieved from the lists. Increases overhead
6156+
in the slab fastpaths but gains more accurate
6157+
NUMA kernel object placement which helps with slow
6158+
interconnects in NUMA systems.
6159+
61506160
slram= [HW,MTD]
61516161

61526162
smart2= [HW]

Documentation/mm/slub.rst

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -175,6 +175,15 @@ can be influenced by kernel parameters:
175175
``slab_max_order`` to 0, what cause minimum possible order of
176176
slabs allocation.
177177

178+
``slab_strict_numa``
179+
Enables the application of memory policies on each
180+
allocation. This results in more accurate placement of
181+
objects which may result in the reduction of accesses
182+
to remote nodes. The default is to only apply memory
183+
policies at the folio level when a new folio is acquired
184+
or a folio is retrieved from the lists. Enabling this
185+
option reduces the fastpath performance of the slab allocator.
186+
178187
SLUB Debug output
179188
=================
180189

mm/slub.c

Lines changed: 43 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -218,6 +218,10 @@ DEFINE_STATIC_KEY_FALSE(slub_debug_enabled);
218218
#endif
219219
#endif /* CONFIG_SLUB_DEBUG */
220220

221+
#ifdef CONFIG_NUMA
222+
static DEFINE_STATIC_KEY_FALSE(strict_numa);
223+
#endif
224+
221225
/* Structure holding parameters for get_partial() call chain */
222226
struct partial_context {
223227
gfp_t flags;
@@ -3949,6 +3953,28 @@ static __always_inline void *__slab_alloc_node(struct kmem_cache *s,
39493953
object = c->freelist;
39503954
slab = c->slab;
39513955

3956+
#ifdef CONFIG_NUMA
3957+
if (static_branch_unlikely(&strict_numa) &&
3958+
node == NUMA_NO_NODE) {
3959+
3960+
struct mempolicy *mpol = current->mempolicy;
3961+
3962+
if (mpol) {
3963+
/*
3964+
* Special BIND rule support. If existing slab
3965+
* is in permitted set then do not redirect
3966+
* to a particular node.
3967+
* Otherwise we apply the memory policy to get
3968+
* the node we need to allocate on.
3969+
*/
3970+
if (mpol->mode != MPOL_BIND || !slab ||
3971+
!node_isset(slab_nid(slab), mpol->nodes))
3972+
3973+
node = mempolicy_slab_node();
3974+
}
3975+
}
3976+
#endif
3977+
39523978
if (!USE_LOCKLESS_FAST_PATH() ||
39533979
unlikely(!object || !slab || !node_match(slab, node))) {
39543980
object = __slab_alloc(s, gfpflags, node, addr, c, orig_size);
@@ -5715,6 +5741,23 @@ static int __init setup_slub_min_objects(char *str)
57155741
__setup("slab_min_objects=", setup_slub_min_objects);
57165742
__setup_param("slub_min_objects=", slub_min_objects, setup_slub_min_objects, 0);
57175743

5744+
#ifdef CONFIG_NUMA
5745+
static int __init setup_slab_strict_numa(char *str)
5746+
{
5747+
if (nr_node_ids > 1) {
5748+
static_branch_enable(&strict_numa);
5749+
pr_info("SLUB: Strict NUMA enabled.\n");
5750+
} else {
5751+
pr_warn("slab_strict_numa parameter set on non NUMA system.\n");
5752+
}
5753+
5754+
return 1;
5755+
}
5756+
5757+
__setup("slab_strict_numa", setup_slab_strict_numa);
5758+
#endif
5759+
5760+
57185761
#ifdef CONFIG_HARDENED_USERCOPY
57195762
/*
57205763
* Rejects incorrectly sized objects and objects that are to be copied

0 commit comments

Comments
 (0)