Skip to content

Commit 695e597

Browse files
More advanced emergency allocator from GCC5.1
1 parent 6ca3eff commit 695e597

File tree

2 files changed

+331
-27
lines changed

2 files changed

+331
-27
lines changed

patches/gcc4.8/gcc-exception-arena.patch

Lines changed: 0 additions & 27 deletions
This file was deleted.
Lines changed: 331 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,331 @@
1+
2+
Modified from https://github.com/gcc-mirror/gcc/commit/ba0b98683c4e8453c35dd17c581ea59755665bc0.patch
3+
4+
diff --git a/libstdc++-v3/libsupc++/eh_alloc.cc b/libstdc++-v3/libsupc++/eh_alloc.cc
5+
index 9b3fb4d..5be017c 100644
6+
--- a/libstdc++-v3/libsupc++/eh_alloc.cc
7+
+++ b/libstdc++-v3/libsupc++/eh_alloc.cc
8+
@@ -34,6 +34,7 @@
9+
#include <exception>
10+
#include "unwind-cxx.h"
11+
#include <ext/concurrence.h>
12+
+#include <new>
13+
14+
#if _GLIBCXX_HOSTED
15+
using std::free;
16+
@@ -56,6 +57,11 @@ using namespace __cxxabiv1;
17+
// so on a system with PSImode pointers we're talking about 56 bytes
18+
// just for overhead.
19+
20+
+#ifdef ESP8266
21+
+# define EMERGENCY_OBJ_SIZE 48
22+
+# define EMERGENCY_OBJ_COUNT 4
23+
+#else
24+
+
25+
#if INT_MAX == 32767
26+
# define EMERGENCY_OBJ_SIZE 128
27+
# define EMERGENCY_OBJ_COUNT 16
28+
@@ -72,28 +78,175 @@ using namespace __cxxabiv1;
29+
# define EMERGENCY_OBJ_COUNT 4
30+
#endif
31+
32+
-#if INT_MAX == 32767 || EMERGENCY_OBJ_COUNT <= 32
33+
-typedef unsigned int bitmask_type;
34+
-#else
35+
-#if defined (_GLIBCXX_LLP64)
36+
-typedef unsigned long long bitmask_type;
37+
-#else
38+
-typedef unsigned long bitmask_type;
39+
-#endif
40+
#endif
41+
42+
+namespace
43+
+{
44+
+ // A fixed-size heap, variable size object allocator
45+
+ class pool
46+
+ {
47+
+ public:
48+
+ pool();
49+
+
50+
+ void *allocate (std::size_t);
51+
+ void free (void *);
52+
+
53+
+ bool in_pool (void *);
54+
+
55+
+ private:
56+
+ struct free_entry {
57+
+ std::size_t size;
58+
+ free_entry *next;
59+
+ };
60+
+ struct allocated_entry {
61+
+ std::size_t size;
62+
+ char data[];
63+
+ };
64+
+
65+
+ // A single mutex controlling emergency allocations.
66+
+ __gnu_cxx::__mutex emergency_mutex;
67+
+
68+
+ // The free-list
69+
+ free_entry *first_free_entry;
70+
+ // The arena itself - we need to keep track of these only
71+
+ // to implement in_pool.
72+
+ char *arena;
73+
+ std::size_t arena_size;
74+
+ };
75+
+
76+
+ pool::pool()
77+
+ {
78+
+ // Allocate the arena - we could add a GLIBCXX_EH_ARENA_SIZE environment
79+
+ // to make this tunable.
80+
+ arena_size = (EMERGENCY_OBJ_SIZE * EMERGENCY_OBJ_COUNT
81+
+ + EMERGENCY_OBJ_COUNT * sizeof (__cxa_dependent_exception));
82+
+ arena = (char *)malloc (arena_size);
83+
+ if (!arena)
84+
+ {
85+
+ // If the allocation failed go without an emergency pool.
86+
+ arena_size = 0;
87+
+ first_free_entry = NULL;
88+
+ return;
89+
+ }
90+
+
91+
+ // Populate the free-list with a single entry covering the whole arena
92+
+ first_free_entry = reinterpret_cast <free_entry *> (arena);
93+
+ new (first_free_entry) free_entry;
94+
+ first_free_entry->size = arena_size;
95+
+ first_free_entry->next = NULL;
96+
+ }
97+
98+
-typedef char one_buffer[EMERGENCY_OBJ_SIZE] __attribute__((aligned));
99+
-static one_buffer emergency_buffer[EMERGENCY_OBJ_COUNT];
100+
-static bitmask_type emergency_used;
101+
+ void *pool::allocate (std::size_t size)
102+
+ {
103+
+ __gnu_cxx::__scoped_lock sentry(emergency_mutex);
104+
+ // We need an additional size_t member.
105+
+ size += sizeof (std::size_t);
106+
+ // And we need to at least hand out objects of the size of
107+
+ // a freelist entry.
108+
+ if (size < sizeof (free_entry))
109+
+ size = sizeof (free_entry);
110+
+ // And we need to align objects we hand out to the required
111+
+ // alignment of a freelist entry (this really aligns the
112+
+ // tail which will become a new freelist entry).
113+
+ size = ((size + __alignof__(free_entry) - 1)
114+
+ & ~(__alignof__(free_entry) - 1));
115+
+ // Search for an entry of proper size on the freelist.
116+
+ free_entry **e;
117+
+ for (e = &first_free_entry;
118+
+ *e && (*e)->size < size;
119+
+ e = &(*e)->next)
120+
+ ;
121+
+ if (!*e)
122+
+ return NULL;
123+
+ allocated_entry *x;
124+
+ if ((*e)->size - size >= sizeof (free_entry))
125+
+ {
126+
+ // Slit block if it is too large.
127+
+ free_entry *f = reinterpret_cast <free_entry *>
128+
+ (reinterpret_cast <char *> (*e) + size);
129+
+ std::size_t sz = (*e)->size;
130+
+ free_entry *next = (*e)->next;
131+
+ new (f) free_entry;
132+
+ f->next = next;
133+
+ f->size = sz - size;
134+
+ x = reinterpret_cast <allocated_entry *> (*e);
135+
+ new (x) allocated_entry;
136+
+ x->size = size;
137+
+ *e = f;
138+
+ }
139+
+ else
140+
+ {
141+
+ // Exact size match or too small overhead for a free entry.
142+
+ std::size_t sz = (*e)->size;
143+
+ free_entry *next = (*e)->next;
144+
+ x = reinterpret_cast <allocated_entry *> (*e);
145+
+ new (x) allocated_entry;
146+
+ x->size = sz;
147+
+ *e = next;
148+
+ }
149+
+ return &x->data;
150+
+ }
151+
152+
-static __cxa_dependent_exception dependents_buffer[EMERGENCY_OBJ_COUNT];
153+
-static bitmask_type dependents_used;
154+
+ void pool::free (void *data)
155+
+ {
156+
+ __gnu_cxx::__scoped_lock sentry(emergency_mutex);
157+
+ allocated_entry *e = reinterpret_cast <allocated_entry *>
158+
+ (reinterpret_cast <char *> (data) - sizeof (std::size_t));
159+
+ std::size_t sz = e->size;
160+
+ if (!first_free_entry)
161+
+ {
162+
+ // If the free list is empty just put the entry there.
163+
+ free_entry *f = reinterpret_cast <free_entry *> (e);
164+
+ new (f) free_entry;
165+
+ f->size = sz;
166+
+ f->next = NULL;
167+
+ first_free_entry = f;
168+
+ }
169+
+ else if (reinterpret_cast <char *> (e) + sz
170+
+ == reinterpret_cast <char *> (first_free_entry))
171+
+ {
172+
+ // Check if we can merge with the first free entry being right
173+
+ // after us.
174+
+ free_entry *f = reinterpret_cast <free_entry *> (e);
175+
+ new (f) free_entry;
176+
+ f->size = sz + first_free_entry->size;
177+
+ f->next = first_free_entry->next;
178+
+ first_free_entry = f;
179+
+ }
180+
+ else
181+
+ {
182+
+ // Else search for a free item we can merge with at its end.
183+
+ free_entry **fe;
184+
+ for (fe = &first_free_entry;
185+
+ (*fe)->next
186+
+ && (reinterpret_cast <char *> ((*fe)->next)
187+
+ > reinterpret_cast <char *> (e) + sz);
188+
+ fe = &(*fe)->next)
189+
+ ;
190+
+ if (reinterpret_cast <char *> (*fe) + (*fe)->size
191+
+ == reinterpret_cast <char *> (e))
192+
+ /* Merge with the freelist entry. */
193+
+ (*fe)->size += sz;
194+
+ else
195+
+ {
196+
+ // Else put it after it which keeps the freelist sorted.
197+
+ free_entry *f = reinterpret_cast <free_entry *> (e);
198+
+ new (f) free_entry;
199+
+ f->size = sz;
200+
+ f->next = (*fe)->next;
201+
+ (*fe)->next = f;
202+
+ }
203+
+ }
204+
+ }
205+
206+
-namespace
207+
-{
208+
- // A single mutex controlling emergency allocations.
209+
- __gnu_cxx::__mutex emergency_mutex;
210+
+ bool pool::in_pool (void *ptr)
211+
+ {
212+
+ char *p = reinterpret_cast <char *> (ptr);
213+
+ return (p > arena
214+
+ && p < arena + arena_size);
215+
+ }
216+
+
217+
+ pool emergency_pool;
218+
}
219+
220+
extern "C" void *
221+
@@ -104,30 +257,11 @@ __cxxabiv1::__cxa_allocate_exception(std::size_t thrown_size) _GLIBCXX_NOTHROW
222+
thrown_size += sizeof (__cxa_refcounted_exception);
223+
ret = malloc (thrown_size);
224+
225+
- if (! ret)
226+
- {
227+
- __gnu_cxx::__scoped_lock sentry(emergency_mutex);
228+
-
229+
- bitmask_type used = emergency_used;
230+
- unsigned int which = 0;
231+
-
232+
- if (thrown_size > EMERGENCY_OBJ_SIZE)
233+
- goto failed;
234+
- while (used & 1)
235+
- {
236+
- used >>= 1;
237+
- if (++which >= EMERGENCY_OBJ_COUNT)
238+
- goto failed;
239+
- }
240+
-
241+
- emergency_used |= (bitmask_type)1 << which;
242+
- ret = &emergency_buffer[which][0];
243+
-
244+
- failed:;
245+
+ if (!ret)
246+
+ ret = emergency_pool.allocate (thrown_size);
247+
248+
- if (!ret)
249+
- std::terminate ();
250+
- }
251+
+ if (!ret)
252+
+ std::terminate ();
253+
254+
// We have an uncaught exception as soon as we allocate memory. This
255+
// yields uncaught_exception() true during the copy-constructor that
256+
@@ -144,19 +278,11 @@ __cxxabiv1::__cxa_allocate_exception(std::size_t thrown_size) _GLIBCXX_NOTHROW
257+
extern "C" void
258+
__cxxabiv1::__cxa_free_exception(void *vptr) _GLIBCXX_NOTHROW
259+
{
260+
- char *base = (char *) emergency_buffer;
261+
- char *ptr = (char *) vptr;
262+
- if (ptr >= base
263+
- && ptr < base + sizeof (emergency_buffer))
264+
- {
265+
- const unsigned int which
266+
- = (unsigned) (ptr - base) / EMERGENCY_OBJ_SIZE;
267+
-
268+
- __gnu_cxx::__scoped_lock sentry(emergency_mutex);
269+
- emergency_used &= ~((bitmask_type)1 << which);
270+
- }
271+
+ char *ptr = (char *) vptr - sizeof (__cxa_refcounted_exception);
272+
+ if (emergency_pool.in_pool (ptr))
273+
+ emergency_pool.free (ptr);
274+
else
275+
- free (ptr - sizeof (__cxa_refcounted_exception));
276+
+ free (ptr);
277+
}
278+
279+
280+
@@ -169,27 +295,11 @@ __cxxabiv1::__cxa_allocate_dependent_exception() _GLIBCXX_NOTHROW
281+
(malloc (sizeof (__cxa_dependent_exception)));
282+
283+
if (!ret)
284+
- {
285+
- __gnu_cxx::__scoped_lock sentry(emergency_mutex);
286+
-
287+
- bitmask_type used = dependents_used;
288+
- unsigned int which = 0;
289+
-
290+
- while (used & 1)
291+
- {
292+
- used >>= 1;
293+
- if (++which >= EMERGENCY_OBJ_COUNT)
294+
- goto failed;
295+
- }
296+
-
297+
- dependents_used |= (bitmask_type)1 << which;
298+
- ret = &dependents_buffer[which];
299+
+ ret = static_cast <__cxa_dependent_exception*>
300+
+ (emergency_pool.allocate (sizeof (__cxa_dependent_exception)));
301+
302+
- failed:;
303+
-
304+
- if (!ret)
305+
- std::terminate ();
306+
- }
307+
+ if (!ret)
308+
+ std::terminate ();
309+
310+
// We have an uncaught exception as soon as we allocate memory. This
311+
// yields uncaught_exception() true during the copy-constructor that
312+
@@ -207,17 +317,8 @@ extern "C" void
313+
__cxxabiv1::__cxa_free_dependent_exception
314+
(__cxa_dependent_exception *vptr) _GLIBCXX_NOTHROW
315+
{
316+
- char *base = (char *) dependents_buffer;
317+
- char *ptr = (char *) vptr;
318+
- if (ptr >= base
319+
- && ptr < base + sizeof (dependents_buffer))
320+
- {
321+
- const unsigned int which
322+
- = (unsigned) (ptr - base) / sizeof (__cxa_dependent_exception);
323+
-
324+
- __gnu_cxx::__scoped_lock sentry(emergency_mutex);
325+
- dependents_used &= ~((bitmask_type)1 << which);
326+
- }
327+
+ if (emergency_pool.in_pool (vptr))
328+
+ emergency_pool.free (vptr);
329+
else
330+
free (vptr);
331+
}

0 commit comments

Comments
 (0)