@@ -25,22 +25,29 @@ terms of the MIT license. A copy of the license can be found in the file
25
25
26
26
// Fast allocation in a page: just pop from the free list.
27
27
// Fall back to generic allocation only if the list is empty.
28
- extern inline void * _mi_page_malloc (mi_heap_t * heap , mi_page_t * page , size_t size ) mi_attr_noexcept {
28
+ extern inline void * _mi_page_malloc (mi_heap_t * heap , mi_page_t * page , size_t size , bool zero ) mi_attr_noexcept {
29
29
mi_assert_internal (page -> xblock_size == 0 || mi_page_block_size (page ) >= size );
30
30
mi_block_t * const block = page -> free ;
31
31
if (mi_unlikely (block == NULL )) {
32
- return _mi_malloc_generic (heap , size );
32
+ return _mi_malloc_generic (heap , size , zero );
33
33
}
34
34
mi_assert_internal (block != NULL && _mi_ptr_page (block ) == page );
35
35
// pop from the free list
36
36
page -> used ++ ;
37
37
page -> free = mi_block_next (page , block );
38
38
mi_assert_internal (page -> free == NULL || _mi_ptr_page (page -> free ) == page );
39
39
40
+ // zero the block?
41
+ if (mi_unlikely (zero )) {
42
+ mi_assert_internal (page -> xblock_size != 0 ); // do not call with zero'ing for huge blocks
43
+ const size_t zsize = (mi_unlikely (page -> is_zero ) ? sizeof (block -> next ) : page -> xblock_size );
44
+ _mi_memzero_aligned (block , zsize );
45
+ }
46
+
40
47
#if (MI_DEBUG > 0 )
41
- if (!page -> is_zero ) { memset (block , MI_DEBUG_UNINIT , size ); }
48
+ if (!page -> is_zero && ! zero ) { memset (block , MI_DEBUG_UNINIT , size ); }
42
49
#elif (MI_SECURE != 0 )
43
- block -> next = 0 ; // don't leak internal data
50
+ if (! zero ) { block -> next = 0 ; } // don't leak internal data
44
51
#endif
45
52
46
53
#if (MI_STAT > 0 )
@@ -69,41 +76,45 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
69
76
return block ;
70
77
}
71
78
72
- // allocate a small block
73
- mi_decl_nodiscard extern inline mi_decl_restrict void * mi_heap_malloc_small (mi_heap_t * heap , size_t size ) mi_attr_noexcept {
74
- mi_assert (heap != NULL );
79
+ static inline mi_decl_restrict void * mi_heap_malloc_small_zero (mi_heap_t * heap , size_t size , bool zero ) mi_attr_noexcept {
80
+ mi_assert (heap != NULL );
75
81
mi_assert (heap -> thread_id == 0 || heap -> thread_id == _mi_thread_id ()); // heaps are thread local
76
82
mi_assert (size <= MI_SMALL_SIZE_MAX );
77
- #if (MI_PADDING )
83
+ #if (MI_PADDING )
78
84
if (size == 0 ) {
79
85
size = sizeof (void * );
80
86
}
81
- #endif
82
- mi_page_t * page = _mi_heap_get_free_small_page (heap ,size + MI_PADDING_SIZE );
83
- void * p = _mi_page_malloc (heap , page , size + MI_PADDING_SIZE );
84
- mi_assert_internal (p == NULL || mi_usable_size (p ) >= size );
85
- #if MI_STAT > 1
87
+ #endif
88
+ mi_page_t * page = _mi_heap_get_free_small_page (heap , size + MI_PADDING_SIZE );
89
+ void * p = _mi_page_malloc (heap , page , size + MI_PADDING_SIZE , zero );
90
+ mi_assert_internal (p == NULL || mi_usable_size (p ) >= size );
91
+ #if MI_STAT > 1
86
92
if (p != NULL ) {
87
93
if (!mi_heap_is_initialized (heap )) { heap = mi_get_default_heap (); }
88
94
mi_heap_stat_increase (heap , malloc , mi_usable_size (p ));
89
95
}
90
- #endif
96
+ #endif
91
97
return p ;
92
98
}
93
99
100
+ // allocate a small block
101
+ mi_decl_nodiscard extern inline mi_decl_restrict void * mi_heap_malloc_small (mi_heap_t * heap , size_t size ) mi_attr_noexcept {
102
+ return mi_heap_malloc_small_zero (heap , size , false);
103
+ }
104
+
94
105
mi_decl_nodiscard extern inline mi_decl_restrict void * mi_malloc_small (size_t size ) mi_attr_noexcept {
95
106
return mi_heap_malloc_small (mi_get_default_heap (), size );
96
107
}
97
108
98
109
// The main allocation function
99
- mi_decl_nodiscard extern inline mi_decl_restrict void * mi_heap_malloc (mi_heap_t * heap , size_t size ) mi_attr_noexcept {
110
+ mi_decl_nodiscard extern inline void * _mi_heap_malloc_zero (mi_heap_t * heap , size_t size , bool zero ) mi_attr_noexcept {
100
111
if (mi_likely (size <= MI_SMALL_SIZE_MAX )) {
101
- return mi_heap_malloc_small (heap , size );
112
+ return mi_heap_malloc_small_zero (heap , size , zero );
102
113
}
103
114
else {
104
115
mi_assert (heap != NULL );
105
- mi_assert (heap -> thread_id == 0 || heap -> thread_id == _mi_thread_id ()); // heaps are thread local
106
- void * const p = _mi_malloc_generic (heap , size + MI_PADDING_SIZE ); // note: size can overflow but it is detected in malloc_generic
116
+ mi_assert (heap -> thread_id == 0 || heap -> thread_id == _mi_thread_id ()); // heaps are thread local
117
+ void * const p = _mi_malloc_generic (heap , size + MI_PADDING_SIZE , zero ); // note: size can overflow but it is detected in malloc_generic
107
118
mi_assert_internal (p == NULL || mi_usable_size (p ) >= size );
108
119
#if MI_STAT > 1
109
120
if (p != NULL ) {
@@ -115,44 +126,17 @@ mi_decl_nodiscard extern inline mi_decl_restrict void* mi_heap_malloc(mi_heap_t*
115
126
}
116
127
}
117
128
118
- mi_decl_nodiscard extern inline mi_decl_restrict void * mi_malloc ( size_t size ) mi_attr_noexcept {
119
- return mi_heap_malloc ( mi_get_default_heap () , size );
129
+ mi_decl_nodiscard extern inline mi_decl_restrict void * mi_heap_malloc ( mi_heap_t * heap , size_t size ) mi_attr_noexcept {
130
+ return _mi_heap_malloc_zero ( heap , size , false );
120
131
}
121
132
122
-
123
- void _mi_block_zero_init (const mi_page_t * page , void * p , size_t size ) {
124
- // note: we need to initialize the whole usable block size to zero, not just the requested size,
125
- // or the recalloc/rezalloc functions cannot safely expand in place (see issue #63)
126
- MI_UNUSED (size );
127
- mi_assert_internal (p != NULL );
128
- mi_assert_internal (mi_usable_size (p ) >= size ); // size can be zero
129
- mi_assert_internal (_mi_ptr_page (p )== page );
130
- if (page -> is_zero && size > sizeof (mi_block_t )) {
131
- // already zero initialized memory
132
- ((mi_block_t * )p )-> next = 0 ; // clear the free list pointer
133
- mi_assert_expensive (mi_mem_is_zero (p , mi_usable_size (p )));
134
- }
135
- else {
136
- // otherwise memset
137
- memset (p , 0 , mi_usable_size (p ));
138
- }
133
+ mi_decl_nodiscard extern inline mi_decl_restrict void * mi_malloc (size_t size ) mi_attr_noexcept {
134
+ return mi_heap_malloc (mi_get_default_heap (), size );
139
135
}
140
136
141
137
// zero initialized small block
142
138
mi_decl_nodiscard mi_decl_restrict void * mi_zalloc_small (size_t size ) mi_attr_noexcept {
143
- void * p = mi_malloc_small (size );
144
- if (p != NULL ) {
145
- _mi_block_zero_init (_mi_ptr_page (p ), p , size ); // todo: can we avoid getting the page again?
146
- }
147
- return p ;
148
- }
149
-
150
- void * _mi_heap_malloc_zero (mi_heap_t * heap , size_t size , bool zero ) mi_attr_noexcept {
151
- void * p = mi_heap_malloc (heap ,size );
152
- if (zero && p != NULL ) {
153
- _mi_block_zero_init (_mi_ptr_page (p ),p ,size ); // todo: can we avoid getting the page again?
154
- }
155
- return p ;
139
+ return mi_heap_malloc_small_zero (mi_get_default_heap (), size , true);
156
140
}
157
141
158
142
mi_decl_nodiscard extern inline mi_decl_restrict void * mi_heap_zalloc (mi_heap_t * heap , size_t size ) mi_attr_noexcept {
@@ -564,6 +548,7 @@ mi_decl_nodiscard size_t mi_usable_size(const void* p) mi_attr_noexcept {
564
548
#ifdef __cplusplus
565
549
void * _mi_externs [] = {
566
550
(void * )& _mi_page_malloc ,
551
+ (void * )& _mi_heap_malloc_zero ,
567
552
(void * )& mi_malloc ,
568
553
(void * )& mi_malloc_small ,
569
554
(void * )& mi_zalloc_small ,
0 commit comments