Skip to content

Commit eb25f15

Browse files
committed
x
1 parent 433fcb1 commit eb25f15

File tree

7 files changed

+235
-18
lines changed

7 files changed

+235
-18
lines changed

include/umf/pools/pool_disjoint.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -98,6 +98,13 @@ umf_result_t umfDisjointPoolParamsSetSharedLimits(
9898
umf_disjoint_pool_params_handle_t hParams,
9999
umf_disjoint_pool_shared_limits_handle_t hSharedLimits);
100100

101+
// TODO add comments
102+
// reuseStrategy - 1 to enable allocation from larger slabs
103+
// TODO - CTL?
104+
umf_result_t
105+
umfDisjointPoolParamsSetReuseStrategy(umf_disjoint_pool_params_handle_t hParams,
106+
unsigned int reuseStrategy);
107+
101108
/// @brief Set custom name of the disjoint pool to be used in the traces.
102109
/// @param hParams handle to the parameters of the disjoint pool.
103110
/// @param name custom name of the pool. Name longer than 64 characters will be truncated.

src/libumf.def

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -139,6 +139,7 @@ EXPORTS
139139
umfCtlExec
140140
umfCtlGet
141141
umfCtlSet
142+
umfDisjointPoolParamsSetReuseStrategy
142143
umfJemallocPoolParamsCreate
143144
umfJemallocPoolParamsDestroy
144145
umfJemallocPoolParamsSetNumArenas

src/libumf.map

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -139,6 +139,7 @@ UMF_0.12 {
139139
umfCtlExec;
140140
umfCtlGet;
141141
umfCtlSet;
142+
umfDisjointPoolParamsSetReuseStrategy;
142143
umfJemallocPoolParamsCreate;
143144
umfJemallocPoolParamsDestroy;
144145
umfJemallocPoolParamsSetNumArenas;

src/pool/pool_disjoint.c

Lines changed: 115 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -36,8 +36,8 @@
3636
// Forward declarations
3737
static void bucket_update_stats(bucket_t *bucket, int in_use, int in_pool);
3838
static bool bucket_can_pool(bucket_t *bucket);
39-
static slab_list_item_t *bucket_get_avail_slab(bucket_t *bucket,
40-
bool *from_pool);
39+
static slab_list_item_t *
40+
bucket_get_avail_slab(disjoint_pool_t *pool, bucket_t *bucket, bool *from_pool);
4141

4242
static __TLS umf_result_t TLS_last_allocation_error;
4343

@@ -69,7 +69,7 @@ static size_t bucket_slab_alloc_size(bucket_t *bucket) {
6969
return utils_max(bucket->size, bucket_slab_min_size(bucket));
7070
}
7171

72-
static slab_t *create_slab(bucket_t *bucket) {
72+
static slab_t *create_slab(bucket_t *bucket, void *mem_ptr) {
7373
assert(bucket);
7474

7575
umf_result_t res = UMF_RESULT_SUCCESS;
@@ -110,13 +110,17 @@ static slab_t *create_slab(bucket_t *bucket) {
110110
// padding at the end of the slab
111111
slab->slab_size = bucket_slab_alloc_size(bucket);
112112

113-
// TODO not true
114-
// NOTE: originally slabs memory were allocated without alignment
115-
// with this registering a slab is simpler and doesn't require multimap
116-
res = umfMemoryProviderAlloc(provider, slab->slab_size, 0, &slab->mem_ptr);
117-
if (res != UMF_RESULT_SUCCESS) {
118-
LOG_ERR("allocation of slab data failed!");
119-
goto free_slab;
113+
// if the mem_ptr is provided, we use the user-provided memory instead of
114+
// allocating a new one
115+
if (mem_ptr) {
116+
slab->mem_ptr = mem_ptr;
117+
} else {
118+
res = umfMemoryProviderAlloc(provider, slab->slab_size, 0,
119+
&slab->mem_ptr);
120+
if (res != UMF_RESULT_SUCCESS) {
121+
LOG_ERR("allocation of slab data failed!");
122+
goto free_slab;
123+
}
120124
}
121125

122126
// raw allocation is not available for user so mark it as inaccessible
@@ -301,6 +305,9 @@ static void bucket_free_chunk(bucket_t *bucket, void *ptr, slab_t *slab,
301305
// pool or freed.
302306
*to_pool = bucket_can_pool(bucket);
303307
if (*to_pool == false) {
308+
309+
// TODO - reuse strategy?
310+
304311
// remove slab
305312
slab_list_item_t *slab_it = &slab->iter;
306313
assert(slab_it->val != NULL);
@@ -317,8 +324,9 @@ static void bucket_free_chunk(bucket_t *bucket, void *ptr, slab_t *slab,
317324
}
318325

319326
// NOTE: this function must be called under bucket->bucket_lock
320-
static void *bucket_get_free_chunk(bucket_t *bucket, bool *from_pool) {
321-
slab_list_item_t *slab_it = bucket_get_avail_slab(bucket, from_pool);
327+
static void *bucket_get_free_chunk(disjoint_pool_t *pool, bucket_t *bucket,
328+
bool *from_pool) {
329+
slab_list_item_t *slab_it = bucket_get_avail_slab(pool, bucket, from_pool);
322330
if (slab_it == NULL) {
323331
return NULL;
324332
}
@@ -342,7 +350,7 @@ static size_t bucket_chunk_cut_off(bucket_t *bucket) {
342350
}
343351

344352
static slab_t *bucket_create_slab(bucket_t *bucket) {
345-
slab_t *slab = create_slab(bucket);
353+
slab_t *slab = create_slab(bucket, NULL);
346354
if (slab == NULL) {
347355
LOG_ERR("create_slab failed!")
348356
return NULL;
@@ -362,8 +370,84 @@ static slab_t *bucket_create_slab(bucket_t *bucket) {
362370
return slab;
363371
}
364372

365-
static slab_list_item_t *bucket_get_avail_slab(bucket_t *bucket,
373+
static slab_list_item_t *bucket_get_avail_slab(disjoint_pool_t *pool,
374+
bucket_t *bucket,
366375
bool *from_pool) {
376+
377+
if (bucket->available_slabs == NULL && pool->params.reuse_strategy == 1) {
378+
// try to find slabs in larger buckets
379+
for (size_t i = 0; i < pool->buckets_num; i++) {
380+
bucket_t *larger_bucket = pool->buckets[i];
381+
if (larger_bucket->size < bucket->size) {
382+
continue;
383+
}
384+
385+
if (larger_bucket->available_slabs == NULL ||
386+
larger_bucket->available_slabs->val->num_chunks_allocated > 0) {
387+
continue;
388+
}
389+
390+
if (larger_bucket->size % bucket->size != 0) {
391+
// TODO what about this case?
392+
continue;
393+
}
394+
395+
// move available slab from larger bucket to smaller one
396+
slab_list_item_t *slab_it = larger_bucket->available_slabs;
397+
assert(slab_it->val != NULL);
398+
DL_DELETE(larger_bucket->available_slabs, slab_it);
399+
// TODO check global lock + bucket locks
400+
pool_unregister_slab(larger_bucket->pool, slab_it->val);
401+
larger_bucket->available_slabs_num--;
402+
larger_bucket->chunked_slabs_in_pool--;
403+
//
404+
bucket_update_stats(larger_bucket, 0, -1);
405+
406+
void *mem_ptr = slab_it->val->mem_ptr;
407+
while (mem_ptr < slab_get_end(slab_it->val)) {
408+
slab_t *slab = create_slab(bucket, mem_ptr);
409+
assert(slab != NULL);
410+
411+
// register the slab in the pool
412+
umf_result_t res = pool_register_slab(bucket->pool, slab);
413+
if (res != UMF_RESULT_SUCCESS) {
414+
// TODO handle errors
415+
return NULL;
416+
}
417+
418+
DL_PREPEND(bucket->available_slabs, &slab->iter);
419+
bucket->available_slabs_num++;
420+
bucket->chunked_slabs_in_pool++;
421+
//
422+
bucket_update_stats(bucket, 0, 1);
423+
424+
mem_ptr = (void *)((uintptr_t)mem_ptr + slab->slab_size);
425+
}
426+
// Ensure that we used the whole slab
427+
assert(mem_ptr == slab_get_end(slab_it->val));
428+
umf_ba_global_free(slab_it->val);
429+
430+
// TODO common code
431+
slab_t *slab = bucket->available_slabs->val;
432+
// Allocation from existing slab is treated as from pool for statistics.
433+
*from_pool = true;
434+
if (slab->num_chunks_allocated == 0) {
435+
assert(bucket->chunked_slabs_in_pool > 0);
436+
// If this was an empty slab, it was in the pool.
437+
// Now it is no longer in the pool, so update count.
438+
--bucket->chunked_slabs_in_pool;
439+
uint64_t size_to_sub = bucket_slab_alloc_size(bucket);
440+
uint64_t old_size = utils_fetch_and_sub_u64(
441+
&bucket->shared_limits->total_size, size_to_sub);
442+
(void)old_size;
443+
assert(old_size >= size_to_sub);
444+
bucket_update_stats(bucket, 1, -1);
445+
}
446+
447+
return bucket->available_slabs;
448+
}
449+
}
450+
367451
if (bucket->available_slabs == NULL) {
368452
bucket_create_slab(bucket);
369453
*from_pool = false;
@@ -403,10 +487,12 @@ static void bucket_update_stats(bucket_t *bucket, int in_use, int in_pool) {
403487
return;
404488
}
405489

490+
assert(in_use >= 0 || bucket->curr_slabs_in_use >= (size_t)(-in_use));
406491
bucket->curr_slabs_in_use += in_use;
407492
bucket->max_slabs_in_use =
408493
utils_max(bucket->curr_slabs_in_use, bucket->max_slabs_in_use);
409494

495+
assert(in_pool >= 0 || bucket->curr_slabs_in_pool >= (size_t)(-in_pool));
410496
bucket->curr_slabs_in_pool += in_pool;
411497
bucket->max_slabs_in_pool =
412498
utils_max(bucket->curr_slabs_in_pool, bucket->max_slabs_in_pool);
@@ -542,7 +628,7 @@ static void *disjoint_pool_allocate(disjoint_pool_t *pool, size_t size) {
542628
utils_mutex_lock(&bucket->bucket_lock);
543629

544630
bool from_pool = false;
545-
ptr = bucket_get_free_chunk(bucket, &from_pool);
631+
ptr = bucket_get_free_chunk(pool, bucket, &from_pool);
546632

547633
if (ptr == NULL) {
548634
TLS_last_allocation_error = UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY;
@@ -759,7 +845,7 @@ void *disjoint_pool_aligned_malloc(void *pool, size_t size, size_t alignment) {
759845

760846
utils_mutex_lock(&bucket->bucket_lock);
761847

762-
ptr = bucket_get_free_chunk(bucket, &from_pool);
848+
ptr = bucket_get_free_chunk(pool, bucket, &from_pool);
763849

764850
if (ptr == NULL) {
765851
TLS_last_allocation_error = UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY;
@@ -984,6 +1070,7 @@ umfDisjointPoolParamsCreate(umf_disjoint_pool_params_handle_t *hParams) {
9841070
.capacity = 0,
9851071
.min_bucket_size = UMF_DISJOINT_POOL_MIN_BUCKET_DEFAULT_SIZE,
9861072
.cur_pool_size = 0,
1073+
.reuse_strategy = 0,
9871074
.pool_trace = 0,
9881075
.shared_limits = NULL,
9891076
.name = {*DEFAULT_NAME},
@@ -1056,7 +1143,6 @@ umfDisjointPoolParamsSetMinBucketSize(umf_disjoint_pool_params_handle_t hParams,
10561143
hParams->min_bucket_size = minBucketSize;
10571144
return UMF_RESULT_SUCCESS;
10581145
}
1059-
10601146
umf_result_t
10611147
umfDisjointPoolParamsSetTrace(umf_disjoint_pool_params_handle_t hParams,
10621148
int poolTrace) {
@@ -1069,6 +1155,18 @@ umfDisjointPoolParamsSetTrace(umf_disjoint_pool_params_handle_t hParams,
10691155
return UMF_RESULT_SUCCESS;
10701156
}
10711157

1158+
umf_result_t
1159+
umfDisjointPoolParamsSetReuseStrategy(umf_disjoint_pool_params_handle_t hParams,
1160+
unsigned int reuseStrategy) {
1161+
if (!hParams) {
1162+
LOG_ERR("disjoint pool params handle is NULL");
1163+
return UMF_RESULT_ERROR_INVALID_ARGUMENT;
1164+
}
1165+
1166+
hParams->reuse_strategy = reuseStrategy;
1167+
return UMF_RESULT_SUCCESS;
1168+
}
1169+
10721170
umf_result_t umfDisjointPoolParamsSetSharedLimits(
10731171
umf_disjoint_pool_params_handle_t hParams,
10741172
umf_disjoint_pool_shared_limits_handle_t hSharedLimits) {

src/pool/pool_disjoint_internal.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -125,6 +125,10 @@ typedef struct umf_disjoint_pool_params_t {
125125
// Holds size of the pool managed by the allocator.
126126
size_t cur_pool_size;
127127

128+
// Reuse strategy
129+
// 1 - reuse larger slabs
130+
unsigned int reuse_strategy;
131+
128132
// Whether to print pool usage statistics
129133
int pool_trace;
130134

test/CMakeLists.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -233,7 +233,7 @@ add_umf_test(
233233
add_umf_test(
234234
NAME disjoint_pool
235235
SRCS pools/disjoint_pool.cpp malloc_compliance_tests.cpp
236-
${BA_SOURCES_FOR_TEST}
236+
${BA_SOURCES_FOR_TEST} ${UMF_CMAKE_SOURCE_DIR}/src/critnib/critnib.c
237237
LIBS ${UMF_UTILS_FOR_TEST})
238238

239239
add_umf_test(

test/pools/disjoint_pool.cpp

Lines changed: 106 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -134,6 +134,112 @@ TEST_F(test, internals) {
134134
umfDisjointPoolParamsDestroy(params);
135135
}
136136

137+
TEST_F(test, internals_reuse) {
138+
static umf_result_t expectedResult = UMF_RESULT_SUCCESS;
139+
struct memory_provider : public umf_test::provider_base_t {
140+
umf_result_t alloc(size_t size, size_t alignment, void **ptr) noexcept {
141+
*ptr = umf_ba_global_aligned_alloc(size, alignment);
142+
return UMF_RESULT_SUCCESS;
143+
}
144+
145+
umf_result_t free(void *ptr, [[maybe_unused]] size_t size) noexcept {
146+
// do the actual free only when we expect the success
147+
if (expectedResult == UMF_RESULT_SUCCESS) {
148+
umf_ba_global_free(ptr);
149+
}
150+
return expectedResult;
151+
}
152+
153+
umf_result_t
154+
get_min_page_size([[maybe_unused]] const void *ptr,
155+
[[maybe_unused]] size_t *pageSize) noexcept {
156+
*pageSize = 1024;
157+
return UMF_RESULT_SUCCESS;
158+
}
159+
};
160+
umf_memory_provider_ops_t provider_ops =
161+
umf_test::providerMakeCOps<memory_provider, void>();
162+
163+
auto providerUnique =
164+
wrapProviderUnique(createProviderChecked(&provider_ops, nullptr));
165+
166+
umf_memory_provider_handle_t provider_handle;
167+
provider_handle = providerUnique.get();
168+
169+
umf_disjoint_pool_params_handle_t params =
170+
(umf_disjoint_pool_params_handle_t)defaultDisjointPoolConfig();
171+
172+
// set to maximum tracing
173+
params->pool_trace = 3;
174+
params->max_poolable_size = 1024 * 1024;
175+
params->capacity = 4;
176+
params->reuse_strategy = 1;
177+
178+
// in "internals" test we use ops interface to directly manipulate the pool
179+
// structure
180+
const umf_memory_pool_ops_t *ops = umfDisjointPoolOps();
181+
EXPECT_NE(ops, nullptr);
182+
183+
disjoint_pool_t *pool;
184+
umf_result_t res = ops->initialize(provider_handle, params, (void **)&pool);
185+
EXPECT_EQ(res, UMF_RESULT_SUCCESS);
186+
EXPECT_NE(pool, nullptr);
187+
EXPECT_EQ(pool->provider_min_page_size, 1024);
188+
189+
// allocate large object, free, then allocate small object and check if
190+
// it is allocated from the same slab
191+
size_t large_size = 1024;
192+
void *ptr = ops->malloc(pool, large_size);
193+
EXPECT_NE(ptr, nullptr);
194+
195+
// get slab and bucket
196+
slab_t *large_slab =
197+
(slab_t *)critnib_find_le(pool->known_slabs, (uintptr_t)ptr);
198+
EXPECT_NE(large_slab, nullptr);
199+
bucket_t *large_bucket = large_slab->bucket;
200+
EXPECT_EQ(large_bucket->size, large_size);
201+
202+
// there is 1 slab in use and 0 completely free slabs available in the pool
203+
EXPECT_EQ(large_bucket->curr_slabs_in_use, 1);
204+
EXPECT_EQ(large_bucket->curr_slabs_in_pool, 0);
205+
206+
ops->free(pool, ptr);
207+
EXPECT_EQ(large_bucket->available_slabs_num, 1);
208+
EXPECT_EQ(large_bucket->curr_slabs_in_use, 0);
209+
EXPECT_EQ(large_bucket->curr_slabs_in_pool, 1);
210+
211+
size_t small_size = 64;
212+
ptr = ops->malloc(pool, small_size);
213+
EXPECT_NE(ptr, nullptr);
214+
215+
// we should reuse the slab from the large bucket
216+
EXPECT_EQ(large_bucket->available_slabs_num, 0);
217+
EXPECT_EQ(large_bucket->curr_slabs_in_use, 0);
218+
EXPECT_EQ(large_bucket->curr_slabs_in_pool, 0);
219+
220+
// get slab and bucket
221+
slab_t *small_slab =
222+
(slab_t *)critnib_find_le(pool->known_slabs, (uintptr_t)ptr);
223+
EXPECT_NE(small_slab, nullptr);
224+
bucket_t *small_bucket = small_slab->bucket;
225+
EXPECT_EQ(small_bucket->size, small_size);
226+
EXPECT_EQ(small_bucket->available_slabs_num, 1);
227+
EXPECT_EQ(small_bucket->curr_slabs_in_use, 1);
228+
EXPECT_EQ(small_bucket->curr_slabs_in_pool, 0);
229+
230+
// check if small object is allocated from the same memory as large
231+
EXPECT_EQ(large_slab->mem_ptr, small_slab->mem_ptr);
232+
233+
// check that the whole large slab was divided into correct number of small
234+
// chunks
235+
EXPECT_EQ(small_slab->num_chunks_total,
236+
large_size / small_size * large_slab->num_chunks_total);
237+
238+
// cleanup
239+
ops->finalize(pool);
240+
umfDisjointPoolParamsDestroy(params);
241+
}
242+
137243
TEST_F(test, freeErrorPropagation) {
138244
static umf_result_t expectedResult = UMF_RESULT_SUCCESS;
139245
struct memory_provider : public umf_test::provider_base_t {

0 commit comments

Comments
 (0)