@@ -164,6 +164,22 @@ static bool pool_move_batch(struct obj_pool *dst, struct obj_pool *src)
164
164
return true;
165
165
}
166
166
167
+ static bool pool_push_batch (struct obj_pool * dst , struct hlist_head * head )
168
+ {
169
+ struct hlist_node * last ;
170
+ struct debug_obj * obj ;
171
+
172
+ if (dst -> cnt >= dst -> max_cnt )
173
+ return false;
174
+
175
+ obj = hlist_entry (head -> first , typeof (* obj ), node );
176
+ last = obj -> batch_last ;
177
+
178
+ hlist_splice_init (head , last , & dst -> objects );
179
+ WRITE_ONCE (dst -> cnt , dst -> cnt + ODEBUG_BATCH_SIZE );
180
+ return true;
181
+ }
182
+
167
183
static bool pool_pop_batch (struct hlist_head * head , struct obj_pool * src )
168
184
{
169
185
if (!src -> cnt )
@@ -288,6 +304,28 @@ static void fill_pool_from_freelist(void)
288
304
clear_bit (0 , & state );
289
305
}
290
306
307
+ static bool kmem_alloc_batch (struct hlist_head * head , struct kmem_cache * cache , gfp_t gfp )
308
+ {
309
+ struct hlist_node * last = NULL ;
310
+ struct debug_obj * obj ;
311
+
312
+ for (int cnt = 0 ; cnt < ODEBUG_BATCH_SIZE ; cnt ++ ) {
313
+ obj = kmem_cache_zalloc (cache , gfp );
314
+ if (!obj ) {
315
+ free_object_list (head );
316
+ return false;
317
+ }
318
+ debug_objects_allocated ++ ;
319
+
320
+ if (!last )
321
+ last = & obj -> node ;
322
+ obj -> batch_last = last ;
323
+
324
+ hlist_add_head (& obj -> node , head );
325
+ }
326
+ return true;
327
+ }
328
+
291
329
static void fill_pool (void )
292
330
{
293
331
static atomic_t cpus_allocating ;
@@ -302,25 +340,14 @@ static void fill_pool(void)
302
340
303
341
atomic_inc (& cpus_allocating );
304
342
while (pool_should_refill (& pool_global )) {
305
- struct debug_obj * new , * last = NULL ;
306
343
HLIST_HEAD (head );
307
- int cnt ;
308
344
309
- for (cnt = 0 ; cnt < ODEBUG_BATCH_SIZE ; cnt ++ ) {
310
- new = kmem_cache_zalloc (obj_cache , __GFP_HIGH | __GFP_NOWARN );
311
- if (!new )
312
- break ;
313
- hlist_add_head (& new -> node , & head );
314
- if (!last )
315
- last = new ;
316
- }
317
- if (!cnt )
345
+ if (!kmem_alloc_batch (& head , obj_cache , __GFP_HIGH | __GFP_NOWARN ))
318
346
break ;
319
347
320
348
guard (raw_spinlock_irqsave )(& pool_lock );
321
- hlist_splice_init (& head , & last -> node , & pool_global .objects );
322
- debug_objects_allocated += cnt ;
323
- WRITE_ONCE (pool_global .cnt , pool_global .cnt + cnt );
349
+ if (!pool_push_batch (& pool_global , & head ))
350
+ pool_push_batch (& pool_to_free , & head );
324
351
}
325
352
atomic_dec (& cpus_allocating );
326
353
}
@@ -1302,36 +1329,27 @@ void __init debug_objects_early_init(void)
1302
1329
static bool __init debug_objects_replace_static_objects (struct kmem_cache * cache )
1303
1330
{
1304
1331
struct debug_bucket * db = obj_hash ;
1305
- struct debug_obj * obj , * new ;
1306
1332
struct hlist_node * tmp ;
1333
+ struct debug_obj * obj ;
1307
1334
HLIST_HEAD (objects );
1308
1335
int i ;
1309
1336
1310
- for (i = 0 ; i < ODEBUG_POOL_SIZE ; i ++ ) {
1311
- obj = kmem_cache_zalloc (cache , GFP_KERNEL );
1312
- if (!obj )
1337
+ for (i = 0 ; i < ODEBUG_POOL_SIZE ; i += ODEBUG_BATCH_SIZE ) {
1338
+ if (!kmem_alloc_batch (& objects , cache , GFP_KERNEL ))
1313
1339
goto free ;
1314
- hlist_add_head ( & obj -> node , & objects );
1340
+ pool_push_batch ( & pool_global , & objects );
1315
1341
}
1316
1342
1317
- debug_objects_allocated = ODEBUG_POOL_SIZE ;
1318
- pool_global .cnt = ODEBUG_POOL_SIZE ;
1319
-
1320
- /*
1321
- * Move the allocated objects to the global pool and disconnect the
1322
- * boot pool.
1323
- */
1324
- hlist_move_list (& objects , & pool_global .objects );
1343
+ /* Disconnect the boot pool. */
1325
1344
pool_boot .first = NULL ;
1326
1345
1327
1346
/* Replace the active object references */
1328
1347
for (i = 0 ; i < ODEBUG_HASH_SIZE ; i ++ , db ++ ) {
1329
1348
hlist_move_list (& db -> list , & objects );
1330
1349
1331
1350
hlist_for_each_entry (obj , & objects , node ) {
1332
- new = hlist_entry (pool_global .objects .first , typeof (* obj ), node );
1333
- hlist_del (& new -> node );
1334
- pool_global .cnt -- ;
1351
+ struct debug_obj * new = pcpu_alloc ();
1352
+
1335
1353
/* copy object data */
1336
1354
* new = * obj ;
1337
1355
hlist_add_head (& new -> node , & db -> list );
@@ -1340,7 +1358,7 @@ static bool __init debug_objects_replace_static_objects(struct kmem_cache *cache
1340
1358
return true;
1341
1359
free :
1342
1360
/* Can't use free_object_list() as the cache is not populated yet */
1343
- hlist_for_each_entry_safe (obj , tmp , & objects , node ) {
1361
+ hlist_for_each_entry_safe (obj , tmp , & pool_global . objects , node ) {
1344
1362
hlist_del (& obj -> node );
1345
1363
kmem_cache_free (cache , obj );
1346
1364
}
0 commit comments