Skip to content

Commit a20941b

Browse files
committed
x
1 parent 433fcb1 commit a20941b

File tree

7 files changed

+234
-18
lines changed

7 files changed

+234
-18
lines changed

include/umf/pools/pool_disjoint.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -98,6 +98,13 @@ umf_result_t umfDisjointPoolParamsSetSharedLimits(
9898
umf_disjoint_pool_params_handle_t hParams,
9999
umf_disjoint_pool_shared_limits_handle_t hSharedLimits);
100100

101+
// TODO add comments
102+
// reuseStrategy - 1 to enable allocation from larger slabs
103+
// TODO - CTL?
104+
umf_result_t
105+
umfDisjointPoolParamsSetReuseStrategy(umf_disjoint_pool_params_handle_t hParams,
106+
unsigned int reuseStrategy);
107+
101108
/// @brief Set custom name of the disjoint pool to be used in the traces.
102109
/// @param hParams handle to the parameters of the disjoint pool.
103110
/// @param name custom name of the pool. Name longer than 64 characters will be truncated.

src/libumf.def

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -139,6 +139,7 @@ EXPORTS
139139
umfCtlExec
140140
umfCtlGet
141141
umfCtlSet
142+
umfDisjointPoolParamsSetReuseStrategy
142143
umfJemallocPoolParamsCreate
143144
umfJemallocPoolParamsDestroy
144145
umfJemallocPoolParamsSetNumArenas

src/libumf.map

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -139,6 +139,7 @@ UMF_0.12 {
139139
umfCtlExec;
140140
umfCtlGet;
141141
umfCtlSet;
142+
umfDisjointPoolParamsSetReuseStrategy;
142143
umfJemallocPoolParamsCreate;
143144
umfJemallocPoolParamsDestroy;
144145
umfJemallocPoolParamsSetNumArenas;

src/pool/pool_disjoint.c

Lines changed: 114 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -36,8 +36,8 @@
3636
// Forward declarations
3737
static void bucket_update_stats(bucket_t *bucket, int in_use, int in_pool);
3838
static bool bucket_can_pool(bucket_t *bucket);
39-
static slab_list_item_t *bucket_get_avail_slab(bucket_t *bucket,
40-
bool *from_pool);
39+
static slab_list_item_t *
40+
bucket_get_avail_slab(disjoint_pool_t *pool, bucket_t *bucket, bool *from_pool);
4141

4242
static __TLS umf_result_t TLS_last_allocation_error;
4343

@@ -69,7 +69,7 @@ static size_t bucket_slab_alloc_size(bucket_t *bucket) {
6969
return utils_max(bucket->size, bucket_slab_min_size(bucket));
7070
}
7171

72-
static slab_t *create_slab(bucket_t *bucket) {
72+
static slab_t *create_slab(bucket_t *bucket, void *mem_ptr) {
7373
assert(bucket);
7474

7575
umf_result_t res = UMF_RESULT_SUCCESS;
@@ -110,13 +110,17 @@ static slab_t *create_slab(bucket_t *bucket) {
110110
// padding at the end of the slab
111111
slab->slab_size = bucket_slab_alloc_size(bucket);
112112

113-
// TODO not true
114-
// NOTE: originally slabs memory were allocated without alignment
115-
// with this registering a slab is simpler and doesn't require multimap
116-
res = umfMemoryProviderAlloc(provider, slab->slab_size, 0, &slab->mem_ptr);
117-
if (res != UMF_RESULT_SUCCESS) {
118-
LOG_ERR("allocation of slab data failed!");
119-
goto free_slab;
113+
// if the mem_ptr is provided, we use the user-provided memory instead of
114+
// allocating a new one
115+
if (mem_ptr) {
116+
slab->mem_ptr = mem_ptr;
117+
} else {
118+
res = umfMemoryProviderAlloc(provider, slab->slab_size, 0,
119+
&slab->mem_ptr);
120+
if (res != UMF_RESULT_SUCCESS) {
121+
LOG_ERR("allocation of slab data failed!");
122+
goto free_slab;
123+
}
120124
}
121125

122126
// raw allocation is not available for user so mark it as inaccessible
@@ -301,6 +305,9 @@ static void bucket_free_chunk(bucket_t *bucket, void *ptr, slab_t *slab,
301305
// pool or freed.
302306
*to_pool = bucket_can_pool(bucket);
303307
if (*to_pool == false) {
308+
309+
// TODO - reuse strategy?
310+
304311
// remove slab
305312
slab_list_item_t *slab_it = &slab->iter;
306313
assert(slab_it->val != NULL);
@@ -317,8 +324,9 @@ static void bucket_free_chunk(bucket_t *bucket, void *ptr, slab_t *slab,
317324
}
318325

319326
// NOTE: this function must be called under bucket->bucket_lock
320-
static void *bucket_get_free_chunk(bucket_t *bucket, bool *from_pool) {
321-
slab_list_item_t *slab_it = bucket_get_avail_slab(bucket, from_pool);
327+
static void *bucket_get_free_chunk(disjoint_pool_t *pool, bucket_t *bucket,
328+
bool *from_pool) {
329+
slab_list_item_t *slab_it = bucket_get_avail_slab(pool, bucket, from_pool);
322330
if (slab_it == NULL) {
323331
return NULL;
324332
}
@@ -342,7 +350,7 @@ static size_t bucket_chunk_cut_off(bucket_t *bucket) {
342350
}
343351

344352
static slab_t *bucket_create_slab(bucket_t *bucket) {
345-
slab_t *slab = create_slab(bucket);
353+
slab_t *slab = create_slab(bucket, NULL);
346354
if (slab == NULL) {
347355
LOG_ERR("create_slab failed!")
348356
return NULL;
@@ -362,8 +370,83 @@ static slab_t *bucket_create_slab(bucket_t *bucket) {
362370
return slab;
363371
}
364372

365-
static slab_list_item_t *bucket_get_avail_slab(bucket_t *bucket,
373+
static slab_list_item_t *bucket_get_avail_slab(disjoint_pool_t *pool,
374+
bucket_t *bucket,
366375
bool *from_pool) {
376+
377+
if (bucket->available_slabs == NULL && pool->params.reuse_strategy == 1) {
378+
// try to find slabs in larger buckets
379+
for (size_t i = 0; i < pool->buckets_num; i++) {
380+
bucket_t *larger_bucket = pool->buckets[i];
381+
if (larger_bucket->size < bucket->size) {
382+
continue;
383+
}
384+
385+
if (larger_bucket->available_slabs == NULL ||
386+
larger_bucket->available_slabs->val->num_chunks_allocated > 0) {
387+
continue;
388+
}
389+
390+
if (larger_bucket->size % bucket->size != 0) {
391+
continue;
392+
}
393+
394+
// move available slab from larger bucket to smaller one
395+
slab_list_item_t *slab_it = larger_bucket->available_slabs;
396+
assert(slab_it->val != NULL);
397+
DL_DELETE(larger_bucket->available_slabs, slab_it);
398+
// TODO check global lock + bucket locks
399+
pool_unregister_slab(larger_bucket->pool, slab_it->val);
400+
larger_bucket->available_slabs_num--;
401+
larger_bucket->chunked_slabs_in_pool--;
402+
//
403+
bucket_update_stats(larger_bucket, 0, -1);
404+
405+
void *mem_ptr = slab_it->val->mem_ptr;
406+
while (mem_ptr < slab_get_end(slab_it->val)) {
407+
slab_t *slab = create_slab(bucket, mem_ptr);
408+
assert(slab != NULL);
409+
410+
// register the slab in the pool
411+
umf_result_t res = pool_register_slab(bucket->pool, slab);
412+
if (res != UMF_RESULT_SUCCESS) {
413+
// TODO handle errors
414+
return NULL;
415+
}
416+
417+
DL_PREPEND(bucket->available_slabs, &slab->iter);
418+
bucket->available_slabs_num++;
419+
bucket->chunked_slabs_in_pool++;
420+
//
421+
bucket_update_stats(bucket, 0, 1);
422+
423+
mem_ptr = (void *)((uintptr_t)mem_ptr + slab->slab_size);
424+
}
425+
// Ensure that we used the whole slab
426+
assert(mem_ptr == slab_get_end(slab_it->val));
427+
umf_ba_global_free(slab_it->val);
428+
429+
// TODO common code
430+
slab_t *slab = bucket->available_slabs->val;
431+
// Allocation from existing slab is treated as from pool for statistics.
432+
*from_pool = true;
433+
if (slab->num_chunks_allocated == 0) {
434+
assert(bucket->chunked_slabs_in_pool > 0);
435+
// If this was an empty slab, it was in the pool.
436+
// Now it is no longer in the pool, so update count.
437+
--bucket->chunked_slabs_in_pool;
438+
uint64_t size_to_sub = bucket_slab_alloc_size(bucket);
439+
uint64_t old_size = utils_fetch_and_sub_u64(
440+
&bucket->shared_limits->total_size, size_to_sub);
441+
(void)old_size;
442+
assert(old_size >= size_to_sub);
443+
bucket_update_stats(bucket, 1, -1);
444+
}
445+
446+
return bucket->available_slabs;
447+
}
448+
}
449+
367450
if (bucket->available_slabs == NULL) {
368451
bucket_create_slab(bucket);
369452
*from_pool = false;
@@ -403,10 +486,12 @@ static void bucket_update_stats(bucket_t *bucket, int in_use, int in_pool) {
403486
return;
404487
}
405488

489+
assert(in_use >= 0 || bucket->curr_slabs_in_use >= (size_t)(-in_use));
406490
bucket->curr_slabs_in_use += in_use;
407491
bucket->max_slabs_in_use =
408492
utils_max(bucket->curr_slabs_in_use, bucket->max_slabs_in_use);
409493

494+
assert(in_pool >= 0 || bucket->curr_slabs_in_pool >= (size_t)(-in_pool));
410495
bucket->curr_slabs_in_pool += in_pool;
411496
bucket->max_slabs_in_pool =
412497
utils_max(bucket->curr_slabs_in_pool, bucket->max_slabs_in_pool);
@@ -542,7 +627,7 @@ static void *disjoint_pool_allocate(disjoint_pool_t *pool, size_t size) {
542627
utils_mutex_lock(&bucket->bucket_lock);
543628

544629
bool from_pool = false;
545-
ptr = bucket_get_free_chunk(bucket, &from_pool);
630+
ptr = bucket_get_free_chunk(pool, bucket, &from_pool);
546631

547632
if (ptr == NULL) {
548633
TLS_last_allocation_error = UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY;
@@ -759,7 +844,7 @@ void *disjoint_pool_aligned_malloc(void *pool, size_t size, size_t alignment) {
759844

760845
utils_mutex_lock(&bucket->bucket_lock);
761846

762-
ptr = bucket_get_free_chunk(bucket, &from_pool);
847+
ptr = bucket_get_free_chunk(pool, bucket, &from_pool);
763848

764849
if (ptr == NULL) {
765850
TLS_last_allocation_error = UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY;
@@ -984,6 +1069,7 @@ umfDisjointPoolParamsCreate(umf_disjoint_pool_params_handle_t *hParams) {
9841069
.capacity = 0,
9851070
.min_bucket_size = UMF_DISJOINT_POOL_MIN_BUCKET_DEFAULT_SIZE,
9861071
.cur_pool_size = 0,
1072+
.reuse_strategy = 0,
9871073
.pool_trace = 0,
9881074
.shared_limits = NULL,
9891075
.name = {*DEFAULT_NAME},
@@ -1056,7 +1142,6 @@ umfDisjointPoolParamsSetMinBucketSize(umf_disjoint_pool_params_handle_t hParams,
10561142
hParams->min_bucket_size = minBucketSize;
10571143
return UMF_RESULT_SUCCESS;
10581144
}
1059-
10601145
umf_result_t
10611146
umfDisjointPoolParamsSetTrace(umf_disjoint_pool_params_handle_t hParams,
10621147
int poolTrace) {
@@ -1069,6 +1154,18 @@ umfDisjointPoolParamsSetTrace(umf_disjoint_pool_params_handle_t hParams,
10691154
return UMF_RESULT_SUCCESS;
10701155
}
10711156

1157+
umf_result_t
1158+
umfDisjointPoolParamsSetReuseStrategy(umf_disjoint_pool_params_handle_t hParams,
1159+
unsigned int reuseStrategy) {
1160+
if (!hParams) {
1161+
LOG_ERR("disjoint pool params handle is NULL");
1162+
return UMF_RESULT_ERROR_INVALID_ARGUMENT;
1163+
}
1164+
1165+
hParams->reuse_strategy = reuseStrategy;
1166+
return UMF_RESULT_SUCCESS;
1167+
}
1168+
10721169
umf_result_t umfDisjointPoolParamsSetSharedLimits(
10731170
umf_disjoint_pool_params_handle_t hParams,
10741171
umf_disjoint_pool_shared_limits_handle_t hSharedLimits) {

src/pool/pool_disjoint_internal.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -125,6 +125,10 @@ typedef struct umf_disjoint_pool_params_t {
125125
// Holds size of the pool managed by the allocator.
126126
size_t cur_pool_size;
127127

128+
// Reuse strategy
129+
// 1 - reuse larger slabs
130+
unsigned int reuse_strategy;
131+
128132
// Whether to print pool usage statistics
129133
int pool_trace;
130134

test/CMakeLists.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -233,7 +233,7 @@ add_umf_test(
233233
add_umf_test(
234234
NAME disjoint_pool
235235
SRCS pools/disjoint_pool.cpp malloc_compliance_tests.cpp
236-
${BA_SOURCES_FOR_TEST}
236+
${BA_SOURCES_FOR_TEST} ${UMF_CMAKE_SOURCE_DIR}/src/critnib/critnib.c
237237
LIBS ${UMF_UTILS_FOR_TEST})
238238

239239
add_umf_test(

test/pools/disjoint_pool.cpp

Lines changed: 106 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -134,6 +134,112 @@ TEST_F(test, internals) {
134134
umfDisjointPoolParamsDestroy(params);
135135
}
136136

137+
TEST_F(test, internals_reuse) {
138+
static umf_result_t expectedResult = UMF_RESULT_SUCCESS;
139+
struct memory_provider : public umf_test::provider_base_t {
140+
umf_result_t alloc(size_t size, size_t alignment, void **ptr) noexcept {
141+
*ptr = umf_ba_global_aligned_alloc(size, alignment);
142+
return UMF_RESULT_SUCCESS;
143+
}
144+
145+
umf_result_t free(void *ptr, [[maybe_unused]] size_t size) noexcept {
146+
// do the actual free only when we expect the success
147+
if (expectedResult == UMF_RESULT_SUCCESS) {
148+
umf_ba_global_free(ptr);
149+
}
150+
return expectedResult;
151+
}
152+
153+
umf_result_t
154+
get_min_page_size([[maybe_unused]] const void *ptr,
155+
[[maybe_unused]] size_t *pageSize) noexcept {
156+
*pageSize = 1024;
157+
return UMF_RESULT_SUCCESS;
158+
}
159+
};
160+
umf_memory_provider_ops_t provider_ops =
161+
umf_test::providerMakeCOps<memory_provider, void>();
162+
163+
auto providerUnique =
164+
wrapProviderUnique(createProviderChecked(&provider_ops, nullptr));
165+
166+
umf_memory_provider_handle_t provider_handle;
167+
provider_handle = providerUnique.get();
168+
169+
umf_disjoint_pool_params_handle_t params =
170+
(umf_disjoint_pool_params_handle_t)defaultDisjointPoolConfig();
171+
172+
// set to maximum tracing
173+
params->pool_trace = 3;
174+
params->max_poolable_size = 1024 * 1024;
175+
params->capacity = 4;
176+
params->reuse_strategy = 1;
177+
178+
// in "internals" test we use ops interface to directly manipulate the pool
179+
// structure
180+
const umf_memory_pool_ops_t *ops = umfDisjointPoolOps();
181+
EXPECT_NE(ops, nullptr);
182+
183+
disjoint_pool_t *pool;
184+
umf_result_t res = ops->initialize(provider_handle, params, (void **)&pool);
185+
EXPECT_EQ(res, UMF_RESULT_SUCCESS);
186+
EXPECT_NE(pool, nullptr);
187+
EXPECT_EQ(pool->provider_min_page_size, 1024);
188+
189+
// allocate large object, free, then allocate small object and check if
190+
// it is allocated from the same slab
191+
size_t large_size = 1024;
192+
void *ptr = ops->malloc(pool, large_size);
193+
EXPECT_NE(ptr, nullptr);
194+
195+
// get slab and bucket
196+
slab_t *large_slab =
197+
(slab_t *)critnib_find_le(pool->known_slabs, (uintptr_t)ptr);
198+
EXPECT_NE(large_slab, nullptr);
199+
bucket_t *large_bucket = large_slab->bucket;
200+
EXPECT_EQ(large_bucket->size, large_size);
201+
202+
// we got 1 slab in use and 0 whole empty slabs avaliable in the pool (wording)
203+
EXPECT_EQ(large_bucket->curr_slabs_in_use, 1);
204+
EXPECT_EQ(large_bucket->curr_slabs_in_pool, 0);
205+
206+
ops->free(pool, ptr);
207+
EXPECT_EQ(large_bucket->available_slabs_num, 1);
208+
EXPECT_EQ(large_bucket->curr_slabs_in_use, 0);
209+
EXPECT_EQ(large_bucket->curr_slabs_in_pool, 1);
210+
211+
size_t small_size = 64;
212+
ptr = ops->malloc(pool, small_size);
213+
EXPECT_NE(ptr, nullptr);
214+
215+
// we should reuse the slab from the large bucket
216+
EXPECT_EQ(large_bucket->available_slabs_num, 0);
217+
EXPECT_EQ(large_bucket->curr_slabs_in_use, 0);
218+
EXPECT_EQ(large_bucket->curr_slabs_in_pool, 0);
219+
220+
// get slab and bucket
221+
slab_t *small_slab =
222+
(slab_t *)critnib_find_le(pool->known_slabs, (uintptr_t)ptr);
223+
EXPECT_NE(small_slab, nullptr);
224+
bucket_t *small_bucket = small_slab->bucket;
225+
EXPECT_EQ(small_bucket->size, small_size);
226+
EXPECT_EQ(small_bucket->available_slabs_num, 1);
227+
EXPECT_EQ(small_bucket->curr_slabs_in_use, 1);
228+
EXPECT_EQ(small_bucket->curr_slabs_in_pool, 0);
229+
230+
// check if small object is allocated from the same memory as large
231+
EXPECT_EQ(large_slab->mem_ptr, small_slab->mem_ptr);
232+
233+
// check that the whole large slab was divided into correct number of small
234+
// chunks
235+
EXPECT_EQ(small_slab->num_chunks_total,
236+
large_size / small_size * large_slab->num_chunks_total);
237+
238+
// cleanup
239+
ops->finalize(pool);
240+
umfDisjointPoolParamsDestroy(params);
241+
}
242+
137243
TEST_F(test, freeErrorPropagation) {
138244
static umf_result_t expectedResult = UMF_RESULT_SUCCESS;
139245
struct memory_provider : public umf_test::provider_base_t {

0 commit comments

Comments
 (0)