@@ -33,7 +33,9 @@ static UTIL_ONCE_FLAG mem_pool_ctl_initialized = UTIL_ONCE_FLAG_INIT;
33
33
char CTL_DEFAULT_ENTRIES [UMF_DEFAULT_SIZE ][UMF_DEFAULT_LEN ] = {0 };
34
34
char CTL_DEFAULT_VALUES [UMF_DEFAULT_SIZE ][UMF_DEFAULT_LEN ] = {0 };
35
35
36
- void ctl_init (void ) { utils_mutex_init (& ctl_mtx ); }
36
+ static struct ctl umf_pool_ctl_root ;
37
+
38
+ static void ctl_init (void );
37
39
38
40
static int CTL_SUBTREE_HANDLER (by_handle_pool )(void * ctx ,
39
41
umf_ctl_query_source_t source ,
@@ -43,9 +45,15 @@ static int CTL_SUBTREE_HANDLER(by_handle_pool)(void *ctx,
43
45
umf_ctl_query_type_t queryType ) {
44
46
(void )indexes , (void )source ;
45
47
umf_memory_pool_handle_t hPool = (umf_memory_pool_handle_t )ctx ;
48
+ int ret = ctl_query (& umf_pool_ctl_root , hPool , source , extra_name ,
49
+ queryType , arg , size );
50
+ if (ret == -1 &&
51
+ errno == EINVAL ) { // node was not found in pool_ctl_root, try to
52
+ // query the specific pool directly
53
+ hPool -> ops .ext_ctl (hPool -> pool_priv , source , extra_name , arg , size ,
54
+ queryType );
55
+ }
46
56
47
- hPool -> ops .ext_ctl (hPool -> pool_priv , /*unused*/ 0 , extra_name , arg , size ,
48
- queryType );
49
57
return 0 ;
50
58
}
51
59
@@ -96,9 +104,38 @@ static int CTL_SUBTREE_HANDLER(default)(void *ctx,
96
104
return 0 ;
97
105
}
98
106
107
+ static int CTL_READ_HANDLER (alloc_count )(void * ctx ,
108
+ umf_ctl_query_source_t source ,
109
+ void * arg , size_t size ,
110
+ umf_ctl_index_utlist_t * indexes ,
111
+ const char * extra_name ,
112
+ umf_ctl_query_type_t query_type ) {
113
+ /* suppress unused-parameter errors */
114
+ (void )source , (void )size , (void )indexes , (void )extra_name , (void )query_type ;
115
+
116
+ size_t * arg_out = arg ;
117
+ if (ctx == NULL || arg_out == NULL ) {
118
+ return UMF_RESULT_ERROR_INVALID_ARGUMENT ;
119
+ }
120
+
121
+ assert (size == sizeof (size_t ));
122
+
123
+ umf_memory_pool_handle_t pool = (umf_memory_pool_handle_t )ctx ;
124
+ utils_atomic_load_acquire_size_t (& pool -> stats .alloc_count , arg_out );
125
+ return UMF_RESULT_SUCCESS ;
126
+ }
127
+
128
+ static const umf_ctl_node_t CTL_NODE (stats )[] = {CTL_LEAF_RO (alloc_count ),
129
+ CTL_NODE_END };
130
+
99
131
umf_ctl_node_t CTL_NODE (pool )[] = {CTL_LEAF_SUBTREE2 (by_handle , by_handle_pool ),
100
132
CTL_LEAF_SUBTREE (default ), CTL_NODE_END };
101
133
134
+ static void ctl_init (void ) {
135
+ utils_mutex_init (& ctl_mtx );
136
+ CTL_REGISTER_MODULE (& umf_pool_ctl_root , stats );
137
+ }
138
+
102
139
static umf_result_t umfDefaultCtlPoolHandle (void * hPool , int operationType ,
103
140
const char * name , void * arg ,
104
141
size_t size ,
@@ -160,6 +197,7 @@ static umf_result_t umfPoolCreateInternal(const umf_memory_pool_ops_t *ops,
160
197
pool -> flags = flags ;
161
198
pool -> ops = * ops ;
162
199
pool -> tag = NULL ;
200
+ memset (& pool -> stats , 0 , sizeof (pool -> stats ));
163
201
164
202
if (NULL == pool -> ops .ext_ctl ) {
165
203
pool -> ops .ext_ctl = umfDefaultCtlPoolHandle ;
@@ -285,23 +323,47 @@ umf_result_t umfPoolCreate(const umf_memory_pool_ops_t *ops,
285
323
286
324
void * umfPoolMalloc (umf_memory_pool_handle_t hPool , size_t size ) {
287
325
UMF_CHECK ((hPool != NULL ), NULL );
288
- return hPool -> ops .malloc (hPool -> pool_priv , size );
326
+ void * ret = hPool -> ops .malloc (hPool -> pool_priv , size );
327
+ if (!ret ) {
328
+ return NULL ;
329
+ }
330
+
331
+ utils_atomic_increment_size_t (& hPool -> stats .alloc_count );
332
+ return ret ;
289
333
}
290
334
291
335
void * umfPoolAlignedMalloc (umf_memory_pool_handle_t hPool , size_t size ,
292
336
size_t alignment ) {
293
337
UMF_CHECK ((hPool != NULL ), NULL );
294
- return hPool -> ops .aligned_malloc (hPool -> pool_priv , size , alignment );
338
+ void * ret = hPool -> ops .aligned_malloc (hPool -> pool_priv , size , alignment );
339
+ if (!ret ) {
340
+ return NULL ;
341
+ }
342
+
343
+ utils_atomic_increment_size_t (& hPool -> stats .alloc_count );
344
+ return ret ;
295
345
}
296
346
297
347
void * umfPoolCalloc (umf_memory_pool_handle_t hPool , size_t num , size_t size ) {
298
348
UMF_CHECK ((hPool != NULL ), NULL );
299
- return hPool -> ops .calloc (hPool -> pool_priv , num , size );
349
+ void * ret = hPool -> ops .calloc (hPool -> pool_priv , num , size );
350
+ if (!ret ) {
351
+ return NULL ;
352
+ }
353
+
354
+ utils_atomic_increment_size_t (& hPool -> stats .alloc_count );
355
+ return ret ;
300
356
}
301
357
302
358
void * umfPoolRealloc (umf_memory_pool_handle_t hPool , void * ptr , size_t size ) {
303
359
UMF_CHECK ((hPool != NULL ), NULL );
304
- return hPool -> ops .realloc (hPool -> pool_priv , ptr , size );
360
+ void * ret = hPool -> ops .realloc (hPool -> pool_priv , ptr , size );
361
+ if (size == 0 && ret == NULL && ptr != NULL ) { // this is free(ptr)
362
+ utils_atomic_decrement_size_t (& hPool -> stats .alloc_count );
363
+ } else if (ptr == NULL && ret != NULL ) { // this is malloc(size)
364
+ utils_atomic_increment_size_t (& hPool -> stats .alloc_count );
365
+ }
366
+ return ret ;
305
367
}
306
368
307
369
size_t umfPoolMallocUsableSize (umf_memory_pool_handle_t hPool ,
@@ -312,7 +374,15 @@ size_t umfPoolMallocUsableSize(umf_memory_pool_handle_t hPool,
312
374
313
375
umf_result_t umfPoolFree (umf_memory_pool_handle_t hPool , void * ptr ) {
314
376
UMF_CHECK ((hPool != NULL ), UMF_RESULT_ERROR_INVALID_ARGUMENT );
315
- return hPool -> ops .free (hPool -> pool_priv , ptr );
377
+ umf_result_t ret = hPool -> ops .free (hPool -> pool_priv , ptr );
378
+
379
+ if (ret != UMF_RESULT_SUCCESS ) {
380
+ return ret ;
381
+ }
382
+ if (ptr != NULL ) {
383
+ utils_atomic_decrement_size_t (& hPool -> stats .alloc_count );
384
+ }
385
+ return ret ;
316
386
}
317
387
318
388
umf_result_t umfPoolGetLastAllocationError (umf_memory_pool_handle_t hPool ) {
0 commit comments