Skip to content

Commit 349dee0

Browse files
committed
fix page map resolving for low addresses (issue #1087)
1 parent 550b628 commit 349dee0

File tree

1 file changed

+34
-36
lines changed

1 file changed

+34
-36
lines changed

src/page-map.c

Lines changed: 34 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -177,13 +177,31 @@ mi_decl_cache_align _Atomic(mi_page_t**)* _mi_page_map;
177177
static size_t mi_page_map_count;
178178
static void* mi_page_map_max_address;
179179
static mi_memid_t mi_page_map_memid;
180+
181+
// divide the main map in 64 (`MI_BFIELD_BITS`) parts commit those parts on demand
180182
static _Atomic(mi_bfield_t) mi_page_map_commit;
181183

182-
static inline bool mi_page_map_is_committed(size_t idx, size_t* pbit_idx);
183-
static mi_page_t** mi_page_map_ensure_committed(size_t idx);
184-
static mi_page_t** mi_page_map_ensure_at(size_t idx);
185-
static inline void mi_page_map_set_range(mi_page_t* page, size_t idx, size_t sub_idx, size_t slice_count);
184+
#define MI_PAGE_MAP_ENTRIES_PER_CBIT (MI_PAGE_MAP_COUNT / MI_BFIELD_BITS)
185+
186+
static inline bool mi_page_map_is_committed(size_t idx, size_t* pbit_idx) {
187+
mi_bfield_t commit = mi_atomic_load_relaxed(&mi_page_map_commit);
188+
const size_t bit_idx = idx/MI_PAGE_MAP_ENTRIES_PER_CBIT;
189+
mi_assert_internal(bit_idx < MI_BFIELD_BITS);
190+
if (pbit_idx != NULL) { *pbit_idx = bit_idx; }
191+
return ((commit & (MI_ZU(1) << bit_idx)) != 0);
192+
}
193+
194+
static mi_page_t** mi_page_map_ensure_committed(size_t idx) {
195+
size_t bit_idx;
196+
if mi_unlikely(!mi_page_map_is_committed(idx, &bit_idx)) {
197+
uint8_t* start = (uint8_t*)&_mi_page_map[bit_idx * MI_PAGE_MAP_ENTRIES_PER_CBIT];
198+
_mi_os_commit(start, MI_PAGE_MAP_ENTRIES_PER_CBIT * sizeof(mi_page_t**), NULL);
199+
mi_atomic_or_acq_rel(&mi_page_map_commit, MI_ZU(1) << bit_idx);
200+
}
201+
return mi_atomic_load_ptr_acquire(mi_page_t*, &_mi_page_map[idx]); // _mi_page_map_at(idx);
202+
}
186203

204+
// initialize the page map
187205
bool _mi_page_map_init(void) {
188206
size_t vbits = (size_t)mi_option_get_clamp(mi_option_max_vabits, 0, MI_SIZE_BITS);
189207
if (vbits == 0) {
@@ -217,24 +235,24 @@ bool _mi_page_map_init(void) {
217235
_mi_warning_message("internal: the page map was committed but not zero initialized!\n");
218236
_mi_memzero_aligned(_mi_page_map, page_map_size);
219237
}
220-
mi_atomic_store_release(&mi_page_map_commit, (commit ? ~MI_ZU(0) : MI_ZU(0)));
238+
mi_atomic_store_release(&mi_page_map_commit, (mi_page_map_memid.initially_committed ? ~MI_ZU(0) : MI_ZU(0)));
221239

222-
// note: for the NULL range we only commit one OS page (in the map and sub)
223-
if (!mi_page_map_memid.initially_committed) {
224-
_mi_os_commit(&_mi_page_map[0], os_page_size, NULL); // commit first part of the map
225-
}
226-
_mi_page_map[0] = (mi_page_t**)((uint8_t*)_mi_page_map + page_map_size); // we reserved a submap part at the end already
240+
// ensure there is a submap for the NULL address
241+
mi_page_t** const sub0 = (mi_page_t**)((uint8_t*)_mi_page_map + page_map_size); // we reserved a submap part at the end already
227242
if (!mi_page_map_memid.initially_committed) {
228-
_mi_os_commit(_mi_page_map[0], submap_size, NULL); // commit full submap (issue #1087)
243+
_mi_os_commit(sub0, submap_size, NULL); // commit full submap (issue #1087)
229244
}
230-
if (!mi_page_map_memid.initially_zero) { // initialize low addresses with NULL
231-
_mi_memzero_aligned(_mi_page_map[0], submap_size);
245+
if (!mi_page_map_memid.initially_zero) { // initialize low addresses with NULL
246+
_mi_memzero_aligned(sub0, submap_size);
232247
}
248+
mi_page_map_ensure_committed(0);
249+
mi_atomic_store_ptr_release(mi_page_t*, &_mi_page_map[0], sub0);
233250

234251
mi_assert_internal(_mi_ptr_page(NULL)==NULL);
235252
return true;
236253
}
237254

255+
238256
void _mi_page_map_unsafe_destroy(void) {
239257
mi_assert_internal(_mi_page_map != NULL);
240258
if (_mi_page_map == NULL) return;
@@ -258,29 +276,9 @@ void _mi_page_map_unsafe_destroy(void) {
258276
}
259277

260278

261-
#define MI_PAGE_MAP_ENTRIES_PER_CBIT (MI_PAGE_MAP_COUNT / MI_BFIELD_BITS)
262-
263-
static inline bool mi_page_map_is_committed(size_t idx, size_t* pbit_idx) {
264-
mi_bfield_t commit = mi_atomic_load_relaxed(&mi_page_map_commit);
265-
const size_t bit_idx = idx/MI_PAGE_MAP_ENTRIES_PER_CBIT;
266-
mi_assert_internal(bit_idx < MI_BFIELD_BITS);
267-
if (pbit_idx != NULL) { *pbit_idx = bit_idx; }
268-
return ((commit & (MI_ZU(1) << bit_idx)) != 0);
269-
}
270-
271-
static mi_page_t** mi_page_map_ensure_committed(size_t idx) {
272-
size_t bit_idx;
273-
if mi_unlikely(!mi_page_map_is_committed(idx, &bit_idx)) {
274-
uint8_t* start = (uint8_t*)&_mi_page_map[bit_idx * MI_PAGE_MAP_ENTRIES_PER_CBIT];
275-
_mi_os_commit(start, MI_PAGE_MAP_ENTRIES_PER_CBIT * sizeof(mi_page_t**), NULL);
276-
mi_atomic_or_acq_rel(&mi_page_map_commit, MI_ZU(1) << bit_idx);
277-
}
278-
return mi_atomic_load_ptr_acquire(mi_page_t*, &_mi_page_map[idx]); // _mi_page_map_at(idx);
279-
}
280-
281-
static mi_page_t** mi_page_map_ensure_at(size_t idx) {
279+
static mi_page_t** mi_page_map_ensure_submap_at(size_t idx) {
282280
mi_page_t** sub = mi_page_map_ensure_committed(idx);
283-
if mi_unlikely(sub == NULL || idx == 0 /* low addresses */) {
281+
if mi_unlikely(sub == NULL) {
284282
// sub map not yet allocated, alloc now
285283
mi_memid_t memid;
286284
mi_page_t** expect = sub;
@@ -306,7 +304,7 @@ static mi_page_t** mi_page_map_ensure_at(size_t idx) {
306304
static void mi_page_map_set_range(mi_page_t* page, size_t idx, size_t sub_idx, size_t slice_count) {
307305
// is the page map area that contains the page address committed?
308306
while (slice_count > 0) {
309-
mi_page_t** sub = mi_page_map_ensure_at(idx);
307+
mi_page_t** sub = mi_page_map_ensure_submap_at(idx);
310308
// set the offsets for the page
311309
while (sub_idx < MI_PAGE_MAP_SUB_COUNT) {
312310
sub[sub_idx] = page;

0 commit comments

Comments
 (0)