36
36
// Forward declarations
37
37
static void bucket_update_stats (bucket_t * bucket , int in_use , int in_pool );
38
38
static bool bucket_can_pool (bucket_t * bucket );
39
- static slab_list_item_t * bucket_get_avail_slab ( bucket_t * bucket ,
40
- bool * from_pool );
39
+ static slab_list_item_t *
40
+ bucket_get_avail_slab ( disjoint_pool_t * pool , bucket_t * bucket , bool * from_pool );
41
41
42
42
static __TLS umf_result_t TLS_last_allocation_error ;
43
43
@@ -69,7 +69,7 @@ static size_t bucket_slab_alloc_size(bucket_t *bucket) {
69
69
return utils_max (bucket -> size , bucket_slab_min_size (bucket ));
70
70
}
71
71
72
- static slab_t * create_slab (bucket_t * bucket ) {
72
+ static slab_t * create_slab (bucket_t * bucket , void * mem_ptr ) {
73
73
assert (bucket );
74
74
75
75
umf_result_t res = UMF_RESULT_SUCCESS ;
@@ -110,13 +110,17 @@ static slab_t *create_slab(bucket_t *bucket) {
110
110
// padding at the end of the slab
111
111
slab -> slab_size = bucket_slab_alloc_size (bucket );
112
112
113
- // TODO not true
114
- // NOTE: originally slabs memory were allocated without alignment
115
- // with this registering a slab is simpler and doesn't require multimap
116
- res = umfMemoryProviderAlloc (provider , slab -> slab_size , 0 , & slab -> mem_ptr );
117
- if (res != UMF_RESULT_SUCCESS ) {
118
- LOG_ERR ("allocation of slab data failed!" );
119
- goto free_slab ;
113
+ // if the mem_ptr is provided, we use the user-provided memory instead of
114
+ // allocating a new one
115
+ if (mem_ptr ) {
116
+ slab -> mem_ptr = mem_ptr ;
117
+ } else {
118
+ res = umfMemoryProviderAlloc (provider , slab -> slab_size , 0 ,
119
+ & slab -> mem_ptr );
120
+ if (res != UMF_RESULT_SUCCESS ) {
121
+ LOG_ERR ("allocation of slab data failed!" );
122
+ goto free_slab ;
123
+ }
120
124
}
121
125
122
126
// raw allocation is not available for user so mark it as inaccessible
@@ -301,6 +305,9 @@ static void bucket_free_chunk(bucket_t *bucket, void *ptr, slab_t *slab,
301
305
// pool or freed.
302
306
* to_pool = bucket_can_pool (bucket );
303
307
if (* to_pool == false) {
308
+
309
+ // TODO - reuse strategy?
310
+
304
311
// remove slab
305
312
slab_list_item_t * slab_it = & slab -> iter ;
306
313
assert (slab_it -> val != NULL );
@@ -317,8 +324,9 @@ static void bucket_free_chunk(bucket_t *bucket, void *ptr, slab_t *slab,
317
324
}
318
325
319
326
// NOTE: this function must be called under bucket->bucket_lock
320
- static void * bucket_get_free_chunk (bucket_t * bucket , bool * from_pool ) {
321
- slab_list_item_t * slab_it = bucket_get_avail_slab (bucket , from_pool );
327
+ static void * bucket_get_free_chunk (disjoint_pool_t * pool , bucket_t * bucket ,
328
+ bool * from_pool ) {
329
+ slab_list_item_t * slab_it = bucket_get_avail_slab (pool , bucket , from_pool );
322
330
if (slab_it == NULL ) {
323
331
return NULL ;
324
332
}
@@ -342,7 +350,7 @@ static size_t bucket_chunk_cut_off(bucket_t *bucket) {
342
350
}
343
351
344
352
static slab_t * bucket_create_slab (bucket_t * bucket ) {
345
- slab_t * slab = create_slab (bucket );
353
+ slab_t * slab = create_slab (bucket , NULL );
346
354
if (slab == NULL ) {
347
355
LOG_ERR ("create_slab failed!" )
348
356
return NULL ;
@@ -362,8 +370,83 @@ static slab_t *bucket_create_slab(bucket_t *bucket) {
362
370
return slab ;
363
371
}
364
372
365
- static slab_list_item_t * bucket_get_avail_slab (bucket_t * bucket ,
373
+ static slab_list_item_t * bucket_get_avail_slab (disjoint_pool_t * pool ,
374
+ bucket_t * bucket ,
366
375
bool * from_pool ) {
376
+
377
+ if (bucket -> available_slabs == NULL && pool -> params .reuse_strategy == 1 ) {
378
+ // try to find slabs in larger buckets
379
+ for (size_t i = 0 ; i < pool -> buckets_num ; i ++ ) {
380
+ bucket_t * larger_bucket = pool -> buckets [i ];
381
+ if (larger_bucket -> size < bucket -> size ) {
382
+ continue ;
383
+ }
384
+
385
+ if (larger_bucket -> available_slabs == NULL ||
386
+ larger_bucket -> available_slabs -> val -> num_chunks_allocated > 0 ) {
387
+ continue ;
388
+ }
389
+
390
+ if (larger_bucket -> size % bucket -> size != 0 ) {
391
+ continue ;
392
+ }
393
+
394
+ // move available slab from larger bucket to smaller one
395
+ slab_list_item_t * slab_it = larger_bucket -> available_slabs ;
396
+ assert (slab_it -> val != NULL );
397
+ DL_DELETE (larger_bucket -> available_slabs , slab_it );
398
+ // TODO check global lock + bucket locks
399
+ pool_unregister_slab (larger_bucket -> pool , slab_it -> val );
400
+ larger_bucket -> available_slabs_num -- ;
401
+ larger_bucket -> chunked_slabs_in_pool -- ;
402
+ //
403
+ bucket_update_stats (larger_bucket , 0 , -1 );
404
+
405
+ void * mem_ptr = slab_it -> val -> mem_ptr ;
406
+ while (mem_ptr < slab_get_end (slab_it -> val )) {
407
+ slab_t * slab = create_slab (bucket , mem_ptr );
408
+ assert (slab != NULL );
409
+
410
+ // register the slab in the pool
411
+ umf_result_t res = pool_register_slab (bucket -> pool , slab );
412
+ if (res != UMF_RESULT_SUCCESS ) {
413
+ // TODO handle errors
414
+ return NULL ;
415
+ }
416
+
417
+ DL_PREPEND (bucket -> available_slabs , & slab -> iter );
418
+ bucket -> available_slabs_num ++ ;
419
+ bucket -> chunked_slabs_in_pool ++ ;
420
+ //
421
+ bucket_update_stats (bucket , 0 , 1 );
422
+
423
+ mem_ptr = (void * )((uintptr_t )mem_ptr + slab -> slab_size );
424
+ }
425
+ // Ensure that we used the whole slab
426
+ assert (mem_ptr == slab_get_end (slab_it -> val ));
427
+ umf_ba_global_free (slab_it -> val );
428
+
429
+ // TODO common code
430
+ slab_t * slab = bucket -> available_slabs -> val ;
431
+ // Allocation from existing slab is treated as from pool for statistics.
432
+ * from_pool = true;
433
+ if (slab -> num_chunks_allocated == 0 ) {
434
+ assert (bucket -> chunked_slabs_in_pool > 0 );
435
+ // If this was an empty slab, it was in the pool.
436
+ // Now it is no longer in the pool, so update count.
437
+ -- bucket -> chunked_slabs_in_pool ;
438
+ uint64_t size_to_sub = bucket_slab_alloc_size (bucket );
439
+ uint64_t old_size = utils_fetch_and_sub_u64 (
440
+ & bucket -> shared_limits -> total_size , size_to_sub );
441
+ (void )old_size ;
442
+ assert (old_size >= size_to_sub );
443
+ bucket_update_stats (bucket , 1 , -1 );
444
+ }
445
+
446
+ return bucket -> available_slabs ;
447
+ }
448
+ }
449
+
367
450
if (bucket -> available_slabs == NULL ) {
368
451
bucket_create_slab (bucket );
369
452
* from_pool = false;
@@ -403,10 +486,12 @@ static void bucket_update_stats(bucket_t *bucket, int in_use, int in_pool) {
403
486
return ;
404
487
}
405
488
489
+ assert (in_use >= 0 || bucket -> curr_slabs_in_use >= (size_t )(- in_use ));
406
490
bucket -> curr_slabs_in_use += in_use ;
407
491
bucket -> max_slabs_in_use =
408
492
utils_max (bucket -> curr_slabs_in_use , bucket -> max_slabs_in_use );
409
493
494
+ assert (in_pool >= 0 || bucket -> curr_slabs_in_pool >= (size_t )(- in_pool ));
410
495
bucket -> curr_slabs_in_pool += in_pool ;
411
496
bucket -> max_slabs_in_pool =
412
497
utils_max (bucket -> curr_slabs_in_pool , bucket -> max_slabs_in_pool );
@@ -542,7 +627,7 @@ static void *disjoint_pool_allocate(disjoint_pool_t *pool, size_t size) {
542
627
utils_mutex_lock (& bucket -> bucket_lock );
543
628
544
629
bool from_pool = false;
545
- ptr = bucket_get_free_chunk (bucket , & from_pool );
630
+ ptr = bucket_get_free_chunk (pool , bucket , & from_pool );
546
631
547
632
if (ptr == NULL ) {
548
633
TLS_last_allocation_error = UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY ;
@@ -759,7 +844,7 @@ void *disjoint_pool_aligned_malloc(void *pool, size_t size, size_t alignment) {
759
844
760
845
utils_mutex_lock (& bucket -> bucket_lock );
761
846
762
- ptr = bucket_get_free_chunk (bucket , & from_pool );
847
+ ptr = bucket_get_free_chunk (pool , bucket , & from_pool );
763
848
764
849
if (ptr == NULL ) {
765
850
TLS_last_allocation_error = UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY ;
@@ -984,6 +1069,7 @@ umfDisjointPoolParamsCreate(umf_disjoint_pool_params_handle_t *hParams) {
984
1069
.capacity = 0 ,
985
1070
.min_bucket_size = UMF_DISJOINT_POOL_MIN_BUCKET_DEFAULT_SIZE ,
986
1071
.cur_pool_size = 0 ,
1072
+ .reuse_strategy = 0 ,
987
1073
.pool_trace = 0 ,
988
1074
.shared_limits = NULL ,
989
1075
.name = {* DEFAULT_NAME },
@@ -1056,7 +1142,6 @@ umfDisjointPoolParamsSetMinBucketSize(umf_disjoint_pool_params_handle_t hParams,
1056
1142
hParams -> min_bucket_size = minBucketSize ;
1057
1143
return UMF_RESULT_SUCCESS ;
1058
1144
}
1059
-
1060
1145
umf_result_t
1061
1146
umfDisjointPoolParamsSetTrace (umf_disjoint_pool_params_handle_t hParams ,
1062
1147
int poolTrace ) {
@@ -1069,6 +1154,18 @@ umfDisjointPoolParamsSetTrace(umf_disjoint_pool_params_handle_t hParams,
1069
1154
return UMF_RESULT_SUCCESS ;
1070
1155
}
1071
1156
1157
+ umf_result_t
1158
+ umfDisjointPoolParamsSetReuseStrategy (umf_disjoint_pool_params_handle_t hParams ,
1159
+ unsigned int reuseStrategy ) {
1160
+ if (!hParams ) {
1161
+ LOG_ERR ("disjoint pool params handle is NULL" );
1162
+ return UMF_RESULT_ERROR_INVALID_ARGUMENT ;
1163
+ }
1164
+
1165
+ hParams -> reuse_strategy = reuseStrategy ;
1166
+ return UMF_RESULT_SUCCESS ;
1167
+ }
1168
+
1072
1169
umf_result_t umfDisjointPoolParamsSetSharedLimits (
1073
1170
umf_disjoint_pool_params_handle_t hParams ,
1074
1171
umf_disjoint_pool_shared_limits_handle_t hSharedLimits ) {
0 commit comments