@@ -47,11 +47,18 @@ struct debug_bucket {
47
47
raw_spinlock_t lock ;
48
48
};
49
49
50
+ struct pool_stats {
51
+ unsigned int cur_used ;
52
+ unsigned int max_used ;
53
+ unsigned int min_fill ;
54
+ };
55
+
50
56
struct obj_pool {
51
57
struct hlist_head objects ;
52
58
unsigned int cnt ;
53
59
unsigned int min_cnt ;
54
60
unsigned int max_cnt ;
61
+ struct pool_stats stats ;
55
62
} ____cacheline_aligned ;
56
63
57
64
@@ -66,8 +73,11 @@ static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
66
73
static DEFINE_RAW_SPINLOCK (pool_lock );
67
74
68
75
static struct obj_pool pool_global = {
69
- .min_cnt = ODEBUG_POOL_MIN_LEVEL ,
70
- .max_cnt = ODEBUG_POOL_SIZE ,
76
+ .min_cnt = ODEBUG_POOL_MIN_LEVEL ,
77
+ .max_cnt = ODEBUG_POOL_SIZE ,
78
+ .stats = {
79
+ .min_fill = ODEBUG_POOL_SIZE ,
80
+ },
71
81
};
72
82
73
83
static struct obj_pool pool_to_free = {
@@ -76,16 +86,6 @@ static struct obj_pool pool_to_free = {
76
86
77
87
static HLIST_HEAD (pool_boot );
78
88
79
- /*
80
- * Because of the presence of percpu free pools, obj_pool_free will
81
- * under-count those in the percpu free pools. Similarly, obj_pool_used
82
- * will over-count those in the percpu free pools. Adjustments will be
83
- * made at debug_stats_show(). Both obj_pool_min_free and obj_pool_max_used
84
- * can be off.
85
- */
86
- static int __data_racy obj_pool_min_free = ODEBUG_POOL_SIZE ;
87
- static int obj_pool_used ;
88
- static int __data_racy obj_pool_max_used ;
89
89
static bool obj_freeing ;
90
90
91
91
static int __data_racy debug_objects_maxchain __read_mostly ;
@@ -231,6 +231,19 @@ static struct debug_obj *__alloc_object(struct hlist_head *list)
231
231
return obj ;
232
232
}
233
233
234
+ static void pcpu_refill_stats (void )
235
+ {
236
+ struct pool_stats * stats = & pool_global .stats ;
237
+
238
+ WRITE_ONCE (stats -> cur_used , stats -> cur_used + ODEBUG_BATCH_SIZE );
239
+
240
+ if (stats -> cur_used > stats -> max_used )
241
+ stats -> max_used = stats -> cur_used ;
242
+
243
+ if (pool_global .cnt < stats -> min_fill )
244
+ stats -> min_fill = pool_global .cnt ;
245
+ }
246
+
234
247
static struct debug_obj * pcpu_alloc (void )
235
248
{
236
249
struct obj_pool * pcp = this_cpu_ptr (& pool_pcpu );
@@ -250,13 +263,7 @@ static struct debug_obj *pcpu_alloc(void)
250
263
if (!pool_move_batch (pcp , & pool_global ))
251
264
return NULL ;
252
265
}
253
- obj_pool_used += ODEBUG_BATCH_SIZE ;
254
-
255
- if (obj_pool_used > obj_pool_max_used )
256
- obj_pool_max_used = obj_pool_used ;
257
-
258
- if (pool_global .cnt < obj_pool_min_free )
259
- obj_pool_min_free = pool_global .cnt ;
266
+ pcpu_refill_stats ();
260
267
}
261
268
}
262
269
@@ -285,7 +292,7 @@ static void pcpu_free(struct debug_obj *obj)
285
292
/* Try to fit the batch into the pool_global first */
286
293
if (!pool_move_batch (& pool_global , pcp ))
287
294
pool_move_batch (& pool_to_free , pcp );
288
- obj_pool_used -= ODEBUG_BATCH_SIZE ;
295
+ WRITE_ONCE ( pool_global . stats . cur_used , pool_global . stats . cur_used - ODEBUG_BATCH_SIZE ) ;
289
296
}
290
297
291
298
static void free_object_list (struct hlist_head * head )
@@ -1074,23 +1081,33 @@ void debug_check_no_obj_freed(const void *address, unsigned long size)
1074
1081
1075
1082
static int debug_stats_show (struct seq_file * m , void * v )
1076
1083
{
1077
- int cpu , obj_percpu_free = 0 ;
1084
+ unsigned int cpu , pool_used , pcp_free = 0 ;
1078
1085
1086
+ /*
1087
+ * pool_global.stats.cur_used is the number of batches currently
1088
+ * handed out to per CPU pools. Convert it to number of objects
1089
+ * and subtract the number of free objects in the per CPU pools.
1090
+ * As this is lockless the number is an estimate.
1091
+ */
1079
1092
for_each_possible_cpu (cpu )
1080
- obj_percpu_free += per_cpu (pool_pcpu .cnt , cpu );
1081
-
1082
- seq_printf (m , "max_chain :%d\n" , debug_objects_maxchain );
1083
- seq_printf (m , "max_checked :%d\n" , debug_objects_maxchecked );
1084
- seq_printf (m , "warnings :%d\n" , debug_objects_warnings );
1085
- seq_printf (m , "fixups :%d\n" , debug_objects_fixups );
1086
- seq_printf (m , "pool_free :%d\n" , pool_count (& pool_global ) + obj_percpu_free );
1087
- seq_printf (m , "pool_pcp_free :%d\n" , obj_percpu_free );
1088
- seq_printf (m , "pool_min_free :%d\n" , obj_pool_min_free );
1089
- seq_printf (m , "pool_used :%d\n" , obj_pool_used - obj_percpu_free );
1090
- seq_printf (m , "pool_max_used :%d\n" , obj_pool_max_used );
1091
- seq_printf (m , "on_free_list :%d\n" , pool_count (& pool_to_free ));
1092
- seq_printf (m , "objs_allocated:%d\n" , debug_objects_allocated );
1093
- seq_printf (m , "objs_freed :%d\n" , debug_objects_freed );
1093
+ pcp_free += per_cpu (pool_pcpu .cnt , cpu );
1094
+
1095
+ pool_used = data_race (pool_global .stats .cur_used );
1096
+ pcp_free = min (pool_used , pcp_free );
1097
+ pool_used -= pcp_free ;
1098
+
1099
+ seq_printf (m , "max_chain : %d\n" , debug_objects_maxchain );
1100
+ seq_printf (m , "max_checked : %d\n" , debug_objects_maxchecked );
1101
+ seq_printf (m , "warnings : %d\n" , debug_objects_warnings );
1102
+ seq_printf (m , "fixups : %d\n" , debug_objects_fixups );
1103
+ seq_printf (m , "pool_free : %u\n" , pool_count (& pool_global ) + pcp_free );
1104
+ seq_printf (m , "pool_pcp_free : %u\n" , pcp_free );
1105
+ seq_printf (m , "pool_min_free : %u\n" , data_race (pool_global .stats .min_fill ));
1106
+ seq_printf (m , "pool_used : %u\n" , pool_used );
1107
+ seq_printf (m , "pool_max_used : %u\n" , data_race (pool_global .stats .max_used ));
1108
+ seq_printf (m , "on_free_list : %u\n" , pool_count (& pool_to_free ));
1109
+ seq_printf (m , "objs_allocated: %d\n" , debug_objects_allocated );
1110
+ seq_printf (m , "objs_freed : %d\n" , debug_objects_freed );
1094
1111
return 0 ;
1095
1112
}
1096
1113
DEFINE_SHOW_ATTRIBUTE (debug_stats );
0 commit comments