Skip to content

Commit 7c7ecf0

Browse files
committed
ensure use of subproc_main during unsafe destroy to avoid derefercing the heap
1 parent a238d2b commit 7c7ecf0

File tree

5 files changed

+28
-24
lines changed

5 files changed

+28
-24
lines changed

include/mimalloc/internal.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -135,7 +135,7 @@ void _mi_os_init(void); // c
135135
void* _mi_os_alloc(size_t size, mi_memid_t* memid);
136136
void* _mi_os_zalloc(size_t size, mi_memid_t* memid);
137137
void _mi_os_free(void* p, size_t size, mi_memid_t memid);
138-
void _mi_os_free_ex(void* p, size_t size, bool still_committed, mi_memid_t memid);
138+
void _mi_os_free_ex(void* p, size_t size, bool still_committed, mi_memid_t memid, mi_subproc_t* subproc );
139139

140140
size_t _mi_os_page_size(void);
141141
size_t _mi_os_guard_page_size(void);
@@ -200,7 +200,7 @@ void _mi_page_map_register(mi_page_t* page);
200200
void _mi_page_map_unregister(mi_page_t* page);
201201
void _mi_page_map_unregister_range(void* start, size_t size);
202202
mi_page_t* _mi_safe_ptr_page(const void* p);
203-
void _mi_page_map_unsafe_destroy(void);
203+
void _mi_page_map_unsafe_destroy(mi_subproc_t* subproc);
204204

205205
// "page.c"
206206
void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept mi_attr_malloc;

src/arena.c

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1074,6 +1074,7 @@ bool _mi_arenas_contain(const void* p) {
10741074
// destroy owned arenas; this is unsafe and should only be done using `mi_option_destroy_on_exit`
10751075
// for dynamic libraries that are unloaded and need to release all their allocated memory.
10761076
static void mi_arenas_unsafe_destroy(mi_subproc_t* subproc) {
1077+
mi_assert_internal(subproc != NULL);
10771078
const size_t max_arena = mi_arenas_get_count(subproc);
10781079
size_t new_max_arena = 0;
10791080
for (size_t i = 0; i < max_arena; i++) {
@@ -1082,7 +1083,7 @@ static void mi_arenas_unsafe_destroy(mi_subproc_t* subproc) {
10821083
// mi_lock_done(&arena->abandoned_visit_lock);
10831084
mi_atomic_store_ptr_release(mi_arena_t, &subproc->arenas[i], NULL);
10841085
if (mi_memkind_is_os(arena->memid.memkind)) {
1085-
_mi_os_free(mi_arena_start(arena), mi_arena_size(arena), arena->memid);
1086+
_mi_os_free_ex(mi_arena_start(arena), mi_arena_size(arena), true, arena->memid, subproc); // pass `subproc` to avoid accessing the heap pointer (in `_mi_subproc()`)
10861087
}
10871088
}
10881089
}
@@ -1096,7 +1097,7 @@ static void mi_arenas_unsafe_destroy(mi_subproc_t* subproc) {
10961097
// destroy owned arenas; this is unsafe and should only be done using `mi_option_destroy_on_exit`
10971098
// for dynamic libraries that are unloaded and need to release all their allocated memory.
10981099
void _mi_arenas_unsafe_destroy_all(mi_tld_t* tld) {
1099-
mi_arenas_unsafe_destroy(_mi_subproc());
1100+
mi_arenas_unsafe_destroy(tld->subproc);
11001101
_mi_arenas_collect(true /* force purge */, true /* visit all*/, tld); // purge non-owned arenas
11011102
}
11021103

@@ -1304,7 +1305,7 @@ static int mi_reserve_os_memory_ex2(mi_subproc_t* subproc, size_t size, bool com
13041305
void* start = _mi_os_alloc_aligned(size, MI_ARENA_SLICE_ALIGN, commit, allow_large, &memid);
13051306
if (start == NULL) return ENOMEM;
13061307
if (!mi_manage_os_memory_ex2(subproc, start, size, -1 /* numa node */, exclusive, memid, NULL, NULL, arena_id)) {
1307-
_mi_os_free_ex(start, size, commit, memid);
1308+
_mi_os_free_ex(start, size, commit, memid, NULL);
13081309
_mi_verbose_message("failed to reserve %zu KiB memory\n", _mi_divide_up(size, 1024));
13091310
return ENOMEM;
13101311
}

src/init.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -779,8 +779,9 @@ void mi_cdecl _mi_process_done(void) {
779779
mi_heap_collect(heap, true /* force */);
780780
_mi_heap_unsafe_destroy_all(heap); // forcefully release all memory held by all heaps (of this thread only!)
781781
_mi_arenas_unsafe_destroy_all(heap->tld);
782-
_mi_page_map_unsafe_destroy();
782+
_mi_page_map_unsafe_destroy(_mi_subproc_main());
783783
}
784+
//_mi_page_map_unsafe_destroy(_mi_subproc_main());
784785

785786
if (mi_option_is_enabled(mi_option_show_stats) || mi_option_is_enabled(mi_option_verbose)) {
786787
mi_stats_print(NULL);

src/os.c

Lines changed: 16 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -168,22 +168,23 @@ bool _mi_os_secure_guard_page_reset_before(void* addr, mi_memid_t memid) {
168168
Free memory
169169
-------------------------------------------------------------- */
170170

171-
static void mi_os_free_huge_os_pages(void* p, size_t size);
171+
static void mi_os_free_huge_os_pages(void* p, size_t size, mi_subproc_t* subproc);
172172

173-
static void mi_os_prim_free(void* addr, size_t size, size_t commit_size) {
173+
static void mi_os_prim_free(void* addr, size_t size, size_t commit_size, mi_subproc_t* subproc) {
174174
mi_assert_internal((size % _mi_os_page_size()) == 0);
175175
if (addr == NULL) return; // || _mi_os_is_huge_reserved(addr)
176176
int err = _mi_prim_free(addr, size); // allow size==0 (issue #1041)
177177
if (err != 0) {
178178
_mi_warning_message("unable to free OS memory (error: %d (0x%x), size: 0x%zx bytes, address: %p)\n", err, err, size, addr);
179179
}
180+
if (subproc == NULL) { subproc = _mi_subproc(); } // from `mi_arenas_unsafe_destroy` we pass subproc_main explicitly as we can no longer use the heap pointer
180181
if (commit_size > 0) {
181-
mi_os_stat_decrease(committed, commit_size);
182+
mi_subproc_stat_decrease(subproc, committed, commit_size);
182183
}
183-
mi_os_stat_decrease(reserved, size);
184+
mi_subproc_stat_decrease(subproc, reserved, size);
184185
}
185186

186-
void _mi_os_free_ex(void* addr, size_t size, bool still_committed, mi_memid_t memid) {
187+
void _mi_os_free_ex(void* addr, size_t size, bool still_committed, mi_memid_t memid, mi_subproc_t* subproc /* can be NULL */) {
187188
if (mi_memkind_is_os(memid.memkind)) {
188189
size_t csize = memid.mem.os.size;
189190
if (csize==0) { csize = _mi_os_good_alloc_size(size); }
@@ -204,10 +205,10 @@ void _mi_os_free_ex(void* addr, size_t size, bool still_committed, mi_memid_t me
204205
// free it
205206
if (memid.memkind == MI_MEM_OS_HUGE) {
206207
mi_assert(memid.is_pinned);
207-
mi_os_free_huge_os_pages(base, csize);
208+
mi_os_free_huge_os_pages(base, csize, subproc);
208209
}
209210
else {
210-
mi_os_prim_free(base, csize, (still_committed ? commit_size : 0));
211+
mi_os_prim_free(base, csize, (still_committed ? commit_size : 0), subproc);
211212
}
212213
}
213214
else {
@@ -217,7 +218,7 @@ void _mi_os_free_ex(void* addr, size_t size, bool still_committed, mi_memid_t me
217218
}
218219

219220
void _mi_os_free(void* p, size_t size, mi_memid_t memid) {
220-
_mi_os_free_ex(p, size, true, memid);
221+
_mi_os_free_ex(p, size, true, memid, NULL);
221222
}
222223

223224

@@ -292,7 +293,7 @@ static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit
292293
_mi_warning_message("unable to allocate aligned OS memory directly, fall back to over-allocation (size: 0x%zx bytes, address: %p, alignment: 0x%zx, commit: %d)\n", size, p, alignment, commit);
293294
}
294295
#endif
295-
if (p != NULL) { mi_os_prim_free(p, size, (commit ? size : 0)); }
296+
if (p != NULL) { mi_os_prim_free(p, size, (commit ? size : 0), NULL); }
296297
if (size >= (SIZE_MAX - alignment)) return NULL; // overflow
297298
const size_t over_size = size + alignment;
298299

@@ -310,7 +311,7 @@ static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit
310311
// explicitly commit only the aligned part
311312
if (commit) {
312313
if (!_mi_os_commit(p, size, NULL)) {
313-
mi_os_prim_free(*base, over_size, 0);
314+
mi_os_prim_free(*base, over_size, 0, NULL);
314315
return NULL;
315316
}
316317
}
@@ -326,8 +327,8 @@ static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit
326327
size_t mid_size = _mi_align_up(size, _mi_os_page_size());
327328
size_t post_size = over_size - pre_size - mid_size;
328329
mi_assert_internal(pre_size < over_size&& post_size < over_size&& mid_size >= size);
329-
if (pre_size > 0) { mi_os_prim_free(p, pre_size, (commit ? pre_size : 0)); }
330-
if (post_size > 0) { mi_os_prim_free((uint8_t*)aligned_p + mid_size, post_size, (commit ? post_size : 0)); }
330+
if (pre_size > 0) { mi_os_prim_free(p, pre_size, (commit ? pre_size : 0), NULL); }
331+
if (post_size > 0) { mi_os_prim_free((uint8_t*)aligned_p + mid_size, post_size, (commit ? post_size : 0), NULL); }
331332
// we can return the aligned pointer on `mmap` systems
332333
p = aligned_p;
333334
*base = aligned_p; // since we freed the pre part, `*base == p`.
@@ -668,7 +669,7 @@ void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_mse
668669
// no success, issue a warning and break
669670
if (p != NULL) {
670671
_mi_warning_message("could not allocate contiguous huge OS page %zu at %p\n", page, addr);
671-
mi_os_prim_free(p, MI_HUGE_OS_PAGE_SIZE, MI_HUGE_OS_PAGE_SIZE);
672+
mi_os_prim_free(p, MI_HUGE_OS_PAGE_SIZE, MI_HUGE_OS_PAGE_SIZE, NULL);
672673
}
673674
break;
674675
}
@@ -710,11 +711,11 @@ void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_mse
710711

711712
// free every huge page in a range individually (as we allocated per page)
712713
// note: needed with VirtualAlloc but could potentially be done in one go on mmap'd systems.
713-
static void mi_os_free_huge_os_pages(void* p, size_t size) {
714+
static void mi_os_free_huge_os_pages(void* p, size_t size, mi_subproc_t* subproc) {
714715
if (p==NULL || size==0) return;
715716
uint8_t* base = (uint8_t*)p;
716717
while (size >= MI_HUGE_OS_PAGE_SIZE) {
717-
mi_os_prim_free(base, MI_HUGE_OS_PAGE_SIZE, MI_HUGE_OS_PAGE_SIZE);
718+
mi_os_prim_free(base, MI_HUGE_OS_PAGE_SIZE, MI_HUGE_OS_PAGE_SIZE, subproc);
718719
size -= MI_HUGE_OS_PAGE_SIZE;
719720
base += MI_HUGE_OS_PAGE_SIZE;
720721
}

src/page-map.c

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -265,7 +265,8 @@ bool _mi_page_map_init(void) {
265265
}
266266

267267

268-
void _mi_page_map_unsafe_destroy(void) {
268+
void _mi_page_map_unsafe_destroy(mi_subproc_t* subproc) {
269+
mi_assert_internal(subproc != NULL);
269270
mi_assert_internal(_mi_page_map != NULL);
270271
if (_mi_page_map == NULL) return;
271272
for (size_t idx = 1; idx < mi_page_map_count; idx++) { // skip entry 0 (as we allocate that submap at the end of the page_map)
@@ -274,12 +275,12 @@ void _mi_page_map_unsafe_destroy(void) {
274275
mi_page_t** sub = _mi_page_map_at(idx);
275276
if (sub != NULL) {
276277
mi_memid_t memid = _mi_memid_create_os(sub, MI_PAGE_MAP_SUB_SIZE, true, false, false);
277-
_mi_os_free(memid.mem.os.base, memid.mem.os.size, memid);
278+
_mi_os_free_ex(memid.mem.os.base, memid.mem.os.size, true, memid, subproc);
278279
mi_atomic_store_ptr_release(mi_page_t*, &_mi_page_map[idx], NULL);
279280
}
280281
}
281282
}
282-
_mi_os_free(_mi_page_map, mi_page_map_memid.mem.os.size, mi_page_map_memid);
283+
_mi_os_free_ex(_mi_page_map, mi_page_map_memid.mem.os.size, true, mi_page_map_memid, subproc);
283284
_mi_page_map = NULL;
284285
mi_page_map_count = 0;
285286
mi_page_map_memid = _mi_memid_none();

0 commit comments

Comments
 (0)