@@ -44,7 +44,7 @@ typedef struct mi_arena_s {
44
44
mi_lock_t abandoned_visit_lock ; // lock is only used when abandoned segments are being visited
45
45
_Atomic (size_t ) search_idx ; // optimization to start the search for free blocks
46
46
_Atomic (mi_msecs_t ) purge_expire ; // expiration time when blocks should be purged from `blocks_purge`.
47
-
47
+
48
48
mi_bitmap_field_t * blocks_dirty ; // are the blocks potentially non-zero?
49
49
mi_bitmap_field_t * blocks_committed ; // are the blocks committed? (can be NULL for memory that cannot be decommitted)
50
50
mi_bitmap_field_t * blocks_purge ; // blocks that can be (reset) decommitted. (can be NULL for memory that cannot be (reset) decommitted)
@@ -365,7 +365,7 @@ static mi_decl_noinline void* mi_arena_try_alloc(int numa_node, size_t size, siz
365
365
static bool mi_arena_reserve (size_t req_size , bool allow_large , mi_arena_id_t * arena_id )
366
366
{
367
367
if (_mi_preloading ()) return false; // use OS only while pre loading
368
-
368
+
369
369
const size_t arena_count = mi_atomic_load_acquire (& mi_arena_count );
370
370
if (arena_count > (MI_MAX_ARENAS - 4 )) return false;
371
371
@@ -407,7 +407,7 @@ void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset
407
407
408
408
// try to allocate in an arena if the alignment is small enough and the object is not too small (as for heap meta data)
409
409
if (!mi_option_is_enabled (mi_option_disallow_arena_alloc )) { // is arena allocation allowed?
410
- if (size >= MI_ARENA_MIN_OBJ_SIZE && alignment <= MI_SEGMENT_ALIGN && align_offset == 0 )
410
+ if (size >= MI_ARENA_MIN_OBJ_SIZE && alignment <= MI_SEGMENT_ALIGN && align_offset == 0 )
411
411
{
412
412
void * p = mi_arena_try_alloc (numa_node , size , alignment , commit , allow_large , req_arena_id , memid );
413
413
if (p != NULL ) return p ;
@@ -487,7 +487,7 @@ static void mi_arena_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks)
487
487
// we need to ensure we do not try to reset (as that may be invalid for uncommitted memory).
488
488
mi_assert_internal (already_committed < blocks );
489
489
mi_assert_internal (mi_option_is_enabled (mi_option_purge_decommits ));
490
- needs_recommit = _mi_os_purge_ex (p , size , false /* allow reset? */ , mi_arena_block_size (already_committed ));
490
+ needs_recommit = _mi_os_purge_ex (p , size , false /* allow reset? */ , mi_arena_block_size (already_committed ));
491
491
}
492
492
493
493
// clear the purged blocks
@@ -556,7 +556,7 @@ static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force)
556
556
{
557
557
// check pre-conditions
558
558
if (arena -> memid .is_pinned ) return false;
559
-
559
+
560
560
// expired yet?
561
561
mi_msecs_t expire = mi_atomic_loadi64_relaxed (& arena -> purge_expire );
562
562
if (!force && (expire == 0 || expire > now )) return false;
@@ -611,7 +611,7 @@ static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force)
611
611
return any_purged ;
612
612
}
613
613
614
- static void mi_arenas_try_purge ( bool force , bool visit_all )
614
+ static void mi_arenas_try_purge ( bool force , bool visit_all )
615
615
{
616
616
if (_mi_preloading () || mi_arena_purge_delay () <= 0 ) return ; // nothing will be scheduled
617
617
@@ -628,7 +628,7 @@ static void mi_arenas_try_purge( bool force, bool visit_all )
628
628
mi_atomic_guard (& purge_guard )
629
629
{
630
630
// increase global expire: at most one purge per delay cycle
631
- mi_atomic_storei64_release (& mi_arenas_purge_expire , now + mi_arena_purge_delay ());
631
+ mi_atomic_storei64_release (& mi_arenas_purge_expire , now + mi_arena_purge_delay ());
632
632
size_t max_purge_count = (visit_all ? max_arena : 2 );
633
633
bool all_visited = true;
634
634
for (size_t i = 0 ; i < max_arena ; i ++ ) {
@@ -947,7 +947,7 @@ void mi_debug_show_arenas(void) mi_attr_noexcept {
947
947
for (size_t i = 0 ; i < max_arenas ; i ++ ) {
948
948
mi_arena_t * arena = mi_atomic_load_ptr_relaxed (mi_arena_t , & mi_arenas [i ]);
949
949
if (arena == NULL ) break ;
950
- _mi_message ("arena %zu: %zu blocks of size %zuMiB (in %zu fields) %s\n" , i , arena -> block_count , MI_ARENA_BLOCK_SIZE / MI_MiB , arena -> field_count , (arena -> memid .is_pinned ? ", pinned" : "" ));
950
+ _mi_message ("arena %zu: %zu blocks of size %zuMiB (in %zu fields) %s\n" , i , arena -> block_count , ( size_t )( MI_ARENA_BLOCK_SIZE / MI_MiB ) , arena -> field_count , (arena -> memid .is_pinned ? ", pinned" : "" ));
951
951
if (show_inuse ) {
952
952
inuse_total += mi_debug_show_bitmap (" " , "inuse blocks" , arena -> block_count , arena -> blocks_inuse , arena -> field_count );
953
953
}
0 commit comments