36
36
// Forward declarations
37
37
static void bucket_update_stats (bucket_t * bucket , int in_use , int in_pool );
38
38
static bool bucket_can_pool (bucket_t * bucket );
39
- static slab_list_item_t * bucket_get_avail_slab ( bucket_t * bucket ,
40
- bool * from_pool );
39
+ static slab_list_item_t *
40
+ bucket_get_avail_slab ( disjoint_pool_t * pool , bucket_t * bucket , bool * from_pool );
41
41
42
42
static __TLS umf_result_t TLS_last_allocation_error ;
43
43
@@ -69,7 +69,7 @@ static size_t bucket_slab_alloc_size(bucket_t *bucket) {
69
69
return utils_max (bucket -> size , bucket_slab_min_size (bucket ));
70
70
}
71
71
72
- static slab_t * create_slab (bucket_t * bucket ) {
72
+ static slab_t * create_slab (bucket_t * bucket , void * mem_ptr ) {
73
73
assert (bucket );
74
74
75
75
umf_result_t res = UMF_RESULT_SUCCESS ;
@@ -110,13 +110,17 @@ static slab_t *create_slab(bucket_t *bucket) {
110
110
// padding at the end of the slab
111
111
slab -> slab_size = bucket_slab_alloc_size (bucket );
112
112
113
- // TODO not true
114
- // NOTE: originally slabs memory were allocated without alignment
115
- // with this registering a slab is simpler and doesn't require multimap
116
- res = umfMemoryProviderAlloc (provider , slab -> slab_size , 0 , & slab -> mem_ptr );
117
- if (res != UMF_RESULT_SUCCESS ) {
118
- LOG_ERR ("allocation of slab data failed!" );
119
- goto free_slab ;
113
+ // if the mem_ptr is provided, we use the user-provided memory instead of
114
+ // allocating a new one
115
+ if (mem_ptr ) {
116
+ slab -> mem_ptr = mem_ptr ;
117
+ } else {
118
+ res = umfMemoryProviderAlloc (provider , slab -> slab_size , 0 ,
119
+ & slab -> mem_ptr );
120
+ if (res != UMF_RESULT_SUCCESS ) {
121
+ LOG_ERR ("allocation of slab data failed!" );
122
+ goto free_slab ;
123
+ }
120
124
}
121
125
122
126
// raw allocation is not available for user so mark it as inaccessible
@@ -301,6 +305,9 @@ static void bucket_free_chunk(bucket_t *bucket, void *ptr, slab_t *slab,
301
305
// pool or freed.
302
306
* to_pool = bucket_can_pool (bucket );
303
307
if (* to_pool == false) {
308
+
309
+ // TODO - reuse strategy?
310
+
304
311
// remove slab
305
312
slab_list_item_t * slab_it = & slab -> iter ;
306
313
assert (slab_it -> val != NULL );
@@ -317,8 +324,9 @@ static void bucket_free_chunk(bucket_t *bucket, void *ptr, slab_t *slab,
317
324
}
318
325
319
326
// NOTE: this function must be called under bucket->bucket_lock
320
- static void * bucket_get_free_chunk (bucket_t * bucket , bool * from_pool ) {
321
- slab_list_item_t * slab_it = bucket_get_avail_slab (bucket , from_pool );
327
+ static void * bucket_get_free_chunk (disjoint_pool_t * pool , bucket_t * bucket ,
328
+ bool * from_pool ) {
329
+ slab_list_item_t * slab_it = bucket_get_avail_slab (pool , bucket , from_pool );
322
330
if (slab_it == NULL ) {
323
331
return NULL ;
324
332
}
@@ -342,7 +350,7 @@ static size_t bucket_chunk_cut_off(bucket_t *bucket) {
342
350
}
343
351
344
352
static slab_t * bucket_create_slab (bucket_t * bucket ) {
345
- slab_t * slab = create_slab (bucket );
353
+ slab_t * slab = create_slab (bucket , NULL );
346
354
if (slab == NULL ) {
347
355
LOG_ERR ("create_slab failed!" )
348
356
return NULL ;
@@ -362,8 +370,84 @@ static slab_t *bucket_create_slab(bucket_t *bucket) {
362
370
return slab ;
363
371
}
364
372
365
- static slab_list_item_t * bucket_get_avail_slab (bucket_t * bucket ,
373
+ static slab_list_item_t * bucket_get_avail_slab (disjoint_pool_t * pool ,
374
+ bucket_t * bucket ,
366
375
bool * from_pool ) {
376
+
377
+ if (bucket -> available_slabs == NULL && pool -> params .reuse_strategy == 1 ) {
378
+ // try to find slabs in larger buckets
379
+ for (size_t i = 0 ; i < pool -> buckets_num ; i ++ ) {
380
+ bucket_t * larger_bucket = pool -> buckets [i ];
381
+ if (larger_bucket -> size < bucket -> size ) {
382
+ continue ;
383
+ }
384
+
385
+ if (larger_bucket -> available_slabs == NULL ||
386
+ larger_bucket -> available_slabs -> val -> num_chunks_allocated > 0 ) {
387
+ continue ;
388
+ }
389
+
390
+ if (larger_bucket -> size % bucket -> size != 0 ) {
391
+ // TODO what about this case?
392
+ continue ;
393
+ }
394
+
395
+ // move available slab from larger bucket to smaller one
396
+ slab_list_item_t * slab_it = larger_bucket -> available_slabs ;
397
+ assert (slab_it -> val != NULL );
398
+ DL_DELETE (larger_bucket -> available_slabs , slab_it );
399
+ // TODO check global lock + bucket locks
400
+ pool_unregister_slab (larger_bucket -> pool , slab_it -> val );
401
+ larger_bucket -> available_slabs_num -- ;
402
+ larger_bucket -> chunked_slabs_in_pool -- ;
403
+ //
404
+ bucket_update_stats (larger_bucket , 0 , -1 );
405
+
406
+ void * mem_ptr = slab_it -> val -> mem_ptr ;
407
+ while (mem_ptr < slab_get_end (slab_it -> val )) {
408
+ slab_t * slab = create_slab (bucket , mem_ptr );
409
+ assert (slab != NULL );
410
+
411
+ // register the slab in the pool
412
+ umf_result_t res = pool_register_slab (bucket -> pool , slab );
413
+ if (res != UMF_RESULT_SUCCESS ) {
414
+ // TODO handle errors
415
+ return NULL ;
416
+ }
417
+
418
+ DL_PREPEND (bucket -> available_slabs , & slab -> iter );
419
+ bucket -> available_slabs_num ++ ;
420
+ bucket -> chunked_slabs_in_pool ++ ;
421
+ //
422
+ bucket_update_stats (bucket , 0 , 1 );
423
+
424
+ mem_ptr = (void * )((uintptr_t )mem_ptr + slab -> slab_size );
425
+ }
426
+ // Ensure that we used the whole slab
427
+ assert (mem_ptr == slab_get_end (slab_it -> val ));
428
+ umf_ba_global_free (slab_it -> val );
429
+
430
+ // TODO common code
431
+ slab_t * slab = bucket -> available_slabs -> val ;
432
+ // Allocation from existing slab is treated as from pool for statistics.
433
+ * from_pool = true;
434
+ if (slab -> num_chunks_allocated == 0 ) {
435
+ assert (bucket -> chunked_slabs_in_pool > 0 );
436
+ // If this was an empty slab, it was in the pool.
437
+ // Now it is no longer in the pool, so update count.
438
+ -- bucket -> chunked_slabs_in_pool ;
439
+ uint64_t size_to_sub = bucket_slab_alloc_size (bucket );
440
+ uint64_t old_size = utils_fetch_and_sub_u64 (
441
+ & bucket -> shared_limits -> total_size , size_to_sub );
442
+ (void )old_size ;
443
+ assert (old_size >= size_to_sub );
444
+ bucket_update_stats (bucket , 1 , -1 );
445
+ }
446
+
447
+ return bucket -> available_slabs ;
448
+ }
449
+ }
450
+
367
451
if (bucket -> available_slabs == NULL ) {
368
452
bucket_create_slab (bucket );
369
453
* from_pool = false;
@@ -403,10 +487,12 @@ static void bucket_update_stats(bucket_t *bucket, int in_use, int in_pool) {
403
487
return ;
404
488
}
405
489
490
+ assert (in_use >= 0 || bucket -> curr_slabs_in_use >= (size_t )(- in_use ));
406
491
bucket -> curr_slabs_in_use += in_use ;
407
492
bucket -> max_slabs_in_use =
408
493
utils_max (bucket -> curr_slabs_in_use , bucket -> max_slabs_in_use );
409
494
495
+ assert (in_pool >= 0 || bucket -> curr_slabs_in_pool >= (size_t )(- in_pool ));
410
496
bucket -> curr_slabs_in_pool += in_pool ;
411
497
bucket -> max_slabs_in_pool =
412
498
utils_max (bucket -> curr_slabs_in_pool , bucket -> max_slabs_in_pool );
@@ -542,7 +628,7 @@ static void *disjoint_pool_allocate(disjoint_pool_t *pool, size_t size) {
542
628
utils_mutex_lock (& bucket -> bucket_lock );
543
629
544
630
bool from_pool = false;
545
- ptr = bucket_get_free_chunk (bucket , & from_pool );
631
+ ptr = bucket_get_free_chunk (pool , bucket , & from_pool );
546
632
547
633
if (ptr == NULL ) {
548
634
TLS_last_allocation_error = UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY ;
@@ -759,7 +845,7 @@ void *disjoint_pool_aligned_malloc(void *pool, size_t size, size_t alignment) {
759
845
760
846
utils_mutex_lock (& bucket -> bucket_lock );
761
847
762
- ptr = bucket_get_free_chunk (bucket , & from_pool );
848
+ ptr = bucket_get_free_chunk (pool , bucket , & from_pool );
763
849
764
850
if (ptr == NULL ) {
765
851
TLS_last_allocation_error = UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY ;
@@ -984,6 +1070,7 @@ umfDisjointPoolParamsCreate(umf_disjoint_pool_params_handle_t *hParams) {
984
1070
.capacity = 0 ,
985
1071
.min_bucket_size = UMF_DISJOINT_POOL_MIN_BUCKET_DEFAULT_SIZE ,
986
1072
.cur_pool_size = 0 ,
1073
+ .reuse_strategy = 0 ,
987
1074
.pool_trace = 0 ,
988
1075
.shared_limits = NULL ,
989
1076
.name = {* DEFAULT_NAME },
@@ -1056,7 +1143,6 @@ umfDisjointPoolParamsSetMinBucketSize(umf_disjoint_pool_params_handle_t hParams,
1056
1143
hParams -> min_bucket_size = minBucketSize ;
1057
1144
return UMF_RESULT_SUCCESS ;
1058
1145
}
1059
-
1060
1146
umf_result_t
1061
1147
umfDisjointPoolParamsSetTrace (umf_disjoint_pool_params_handle_t hParams ,
1062
1148
int poolTrace ) {
@@ -1069,6 +1155,18 @@ umfDisjointPoolParamsSetTrace(umf_disjoint_pool_params_handle_t hParams,
1069
1155
return UMF_RESULT_SUCCESS ;
1070
1156
}
1071
1157
1158
+ umf_result_t
1159
+ umfDisjointPoolParamsSetReuseStrategy (umf_disjoint_pool_params_handle_t hParams ,
1160
+ unsigned int reuseStrategy ) {
1161
+ if (!hParams ) {
1162
+ LOG_ERR ("disjoint pool params handle is NULL" );
1163
+ return UMF_RESULT_ERROR_INVALID_ARGUMENT ;
1164
+ }
1165
+
1166
+ hParams -> reuse_strategy = reuseStrategy ;
1167
+ return UMF_RESULT_SUCCESS ;
1168
+ }
1169
+
1072
1170
umf_result_t umfDisjointPoolParamsSetSharedLimits (
1073
1171
umf_disjoint_pool_params_handle_t hParams ,
1074
1172
umf_disjoint_pool_shared_limits_handle_t hSharedLimits ) {
0 commit comments