Skip to content

Commit aebbfe0

Browse files
committed
debugobjects: Prepare kmem_cache allocations for batching
Allocate a batch and then push it into the pool. Utilize the debug_obj::last_node pointer for keeping track of the batch boundary. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Link: https://lore.kernel.org/all/20241007164914.198647184@linutronix.de
1 parent 74fe1ad commit aebbfe0

File tree

1 file changed

+49
-31
lines changed

1 file changed

+49
-31
lines changed

lib/debugobjects.c

Lines changed: 49 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -164,6 +164,22 @@ static bool pool_move_batch(struct obj_pool *dst, struct obj_pool *src)
164164
return true;
165165
}
166166

167+
static bool pool_push_batch(struct obj_pool *dst, struct hlist_head *head)
168+
{
169+
struct hlist_node *last;
170+
struct debug_obj *obj;
171+
172+
if (dst->cnt >= dst->max_cnt)
173+
return false;
174+
175+
obj = hlist_entry(head->first, typeof(*obj), node);
176+
last = obj->batch_last;
177+
178+
hlist_splice_init(head, last, &dst->objects);
179+
WRITE_ONCE(dst->cnt, dst->cnt + ODEBUG_BATCH_SIZE);
180+
return true;
181+
}
182+
167183
static bool pool_pop_batch(struct hlist_head *head, struct obj_pool *src)
168184
{
169185
if (!src->cnt)
@@ -288,6 +304,28 @@ static void fill_pool_from_freelist(void)
288304
clear_bit(0, &state);
289305
}
290306

307+
static bool kmem_alloc_batch(struct hlist_head *head, struct kmem_cache *cache, gfp_t gfp)
308+
{
309+
struct hlist_node *last = NULL;
310+
struct debug_obj *obj;
311+
312+
for (int cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) {
313+
obj = kmem_cache_zalloc(cache, gfp);
314+
if (!obj) {
315+
free_object_list(head);
316+
return false;
317+
}
318+
debug_objects_allocated++;
319+
320+
if (!last)
321+
last = &obj->node;
322+
obj->batch_last = last;
323+
324+
hlist_add_head(&obj->node, head);
325+
}
326+
return true;
327+
}
328+
291329
static void fill_pool(void)
292330
{
293331
static atomic_t cpus_allocating;
@@ -302,25 +340,14 @@ static void fill_pool(void)
302340

303341
atomic_inc(&cpus_allocating);
304342
while (pool_should_refill(&pool_global)) {
305-
struct debug_obj *new, *last = NULL;
306343
HLIST_HEAD(head);
307-
int cnt;
308344

309-
for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) {
310-
new = kmem_cache_zalloc(obj_cache, __GFP_HIGH | __GFP_NOWARN);
311-
if (!new)
312-
break;
313-
hlist_add_head(&new->node, &head);
314-
if (!last)
315-
last = new;
316-
}
317-
if (!cnt)
345+
if (!kmem_alloc_batch(&head, obj_cache, __GFP_HIGH | __GFP_NOWARN))
318346
break;
319347

320348
guard(raw_spinlock_irqsave)(&pool_lock);
321-
hlist_splice_init(&head, &last->node, &pool_global.objects);
322-
debug_objects_allocated += cnt;
323-
WRITE_ONCE(pool_global.cnt, pool_global.cnt + cnt);
349+
if (!pool_push_batch(&pool_global, &head))
350+
pool_push_batch(&pool_to_free, &head);
324351
}
325352
atomic_dec(&cpus_allocating);
326353
}
@@ -1302,36 +1329,27 @@ void __init debug_objects_early_init(void)
13021329
static bool __init debug_objects_replace_static_objects(struct kmem_cache *cache)
13031330
{
13041331
struct debug_bucket *db = obj_hash;
1305-
struct debug_obj *obj, *new;
13061332
struct hlist_node *tmp;
1333+
struct debug_obj *obj;
13071334
HLIST_HEAD(objects);
13081335
int i;
13091336

1310-
for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
1311-
obj = kmem_cache_zalloc(cache, GFP_KERNEL);
1312-
if (!obj)
1337+
for (i = 0; i < ODEBUG_POOL_SIZE; i += ODEBUG_BATCH_SIZE) {
1338+
if (!kmem_alloc_batch(&objects, cache, GFP_KERNEL))
13131339
goto free;
1314-
hlist_add_head(&obj->node, &objects);
1340+
pool_push_batch(&pool_global, &objects);
13151341
}
13161342

1317-
debug_objects_allocated = ODEBUG_POOL_SIZE;
1318-
pool_global.cnt = ODEBUG_POOL_SIZE;
1319-
1320-
/*
1321-
* Move the allocated objects to the global pool and disconnect the
1322-
* boot pool.
1323-
*/
1324-
hlist_move_list(&objects, &pool_global.objects);
1343+
/* Disconnect the boot pool. */
13251344
pool_boot.first = NULL;
13261345

13271346
/* Replace the active object references */
13281347
for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
13291348
hlist_move_list(&db->list, &objects);
13301349

13311350
hlist_for_each_entry(obj, &objects, node) {
1332-
new = hlist_entry(pool_global.objects.first, typeof(*obj), node);
1333-
hlist_del(&new->node);
1334-
pool_global.cnt--;
1351+
struct debug_obj *new = pcpu_alloc();
1352+
13351353
/* copy object data */
13361354
*new = *obj;
13371355
hlist_add_head(&new->node, &db->list);
@@ -1340,7 +1358,7 @@ static bool __init debug_objects_replace_static_objects(struct kmem_cache *cache
13401358
return true;
13411359
free:
13421360
/* Can't use free_object_list() as the cache is not populated yet */
1343-
hlist_for_each_entry_safe(obj, tmp, &objects, node) {
1361+
hlist_for_each_entry_safe(obj, tmp, &pool_global.objects, node) {
13441362
hlist_del(&obj->node);
13451363
kmem_cache_free(cache, obj);
13461364
}

0 commit comments

Comments
 (0)