@@ -31,9 +31,8 @@ void upb_Arena_SetMaxBlockSize(size_t max) {
31
31
32
32
typedef struct upb_MemBlock {
33
33
struct upb_MemBlock * next ;
34
- // If this block is the head of the list, tracks a growing hint of what the
35
- // *next* block should be; otherwise tracks the size of the actual allocation.
36
- size_t size_or_hint ;
34
+ // Size of the actual allocation.
35
+ size_t size ;
37
36
// Data follows.
38
37
} upb_MemBlock ;
39
38
@@ -46,6 +45,12 @@ typedef struct upb_ArenaInternal {
46
45
// freed in an arena.
47
46
upb_AllocCleanupFunc * upb_alloc_cleanup ;
48
47
48
+ // Linked list of blocks to free/cleanup.
49
+ upb_MemBlock * blocks ;
50
+
51
+ // A growing hint of what the *next* block should be sized
52
+ size_t size_hint ;
53
+
49
54
// When multiple arenas are fused together, each arena points to a parent
50
55
// arena (root points to itself). The root tracks how many live arenas
51
56
// reference it.
@@ -68,9 +73,6 @@ typedef struct upb_ArenaInternal {
68
73
// such that a->previous_or_tail->next == a.
69
74
UPB_ATOMIC (uintptr_t ) previous_or_tail ;
70
75
71
- // Linked list of blocks to free/cleanup.
72
- upb_MemBlock * blocks ;
73
-
74
76
// Total space allocated in blocks, atomic only for SpaceAllocated
75
77
UPB_ATOMIC (uintptr_t ) space_allocated ;
76
78
@@ -102,12 +104,6 @@ static upb_ArenaInternal* upb_Arena_Internal(const upb_Arena* a) {
102
104
return & ((upb_ArenaState * )a )-> body ;
103
105
}
104
106
105
- // Extracts the (upb_Arena*) from a (upb_ArenaInternal*)
106
- static upb_Arena * upb_Arena_FromInternal (const upb_ArenaInternal * ai ) {
107
- ptrdiff_t offset = - offsetof(upb_ArenaState , body );
108
- return UPB_PTR_AT (ai , offset , upb_Arena );
109
- }
110
-
111
107
static bool _upb_Arena_IsTaggedRefcount (uintptr_t parent_or_count ) {
112
108
return (parent_or_count & 1 ) == 1 ;
113
109
}
@@ -308,14 +304,10 @@ static void _upb_Arena_AddBlock(upb_Arena* a, void* ptr, size_t offset,
308
304
upb_ArenaInternal * ai = upb_Arena_Internal (a );
309
305
upb_MemBlock * block = ptr ;
310
306
311
- block -> size_or_hint = block_size ;
307
+ block -> size = block_size ;
312
308
UPB_ASSERT (offset >= kUpb_MemblockReserve );
313
309
char * start = UPB_PTR_AT (block , offset , char );
314
310
upb_MemBlock * head = ai -> blocks ;
315
- if (head && head -> next ) {
316
- // Fix up size to match actual allocation size
317
- head -> size_or_hint = a -> UPB_PRIVATE (end ) - (char * )head ;
318
- }
319
311
block -> next = head ;
320
312
ai -> blocks = block ;
321
313
UPB_PRIVATE (upb_Xsan_Init )(UPB_XSAN (a ));
@@ -334,7 +326,7 @@ void* UPB_PRIVATE(_upb_Arena_SlowMalloc)(upb_Arena* a, size_t size) {
334
326
size_t current_free = 0 ;
335
327
upb_MemBlock * last_block = ai -> blocks ;
336
328
if (last_block ) {
337
- last_size = a -> UPB_PRIVATE ( end ) - ( char * ) last_block ;
329
+ last_size = last_block -> size ;
338
330
current_free = a -> UPB_PRIVATE (end ) - a -> UPB_PRIVATE (ptr );
339
331
}
340
332
@@ -351,7 +343,7 @@ void* UPB_PRIVATE(_upb_Arena_SlowMalloc)(upb_Arena* a, size_t size) {
351
343
// allocations, allocate blocks that would net reduce free space behind it.
352
344
if (last_block && current_free > future_free &&
353
345
target_size < max_block_size ) {
354
- last_size = last_block -> size_or_hint ;
346
+ last_size = ai -> size_hint ;
355
347
// Recalculate sizes with possibly larger last_size
356
348
target_size = UPB_MIN (last_size * 2 , max_block_size );
357
349
future_free = UPB_MAX (size , target_size - kUpb_MemblockReserve ) - size ;
@@ -370,8 +362,7 @@ void* UPB_PRIVATE(_upb_Arena_SlowMalloc)(upb_Arena* a, size_t size) {
370
362
// will reach the max block size. Allocations larger than the max block size
371
363
// will always get their own backing allocation, so don't include them.
372
364
if (target_size <= max_block_size ) {
373
- last_block -> size_or_hint =
374
- UPB_MIN (last_block -> size_or_hint + (size >> 1 ), max_block_size >> 1 );
365
+ ai -> size_hint = UPB_MIN (ai -> size_hint + (size >> 1 ), max_block_size >> 1 );
375
366
}
376
367
}
377
368
// We may need to exceed the max block size if the user requested a large
@@ -397,7 +388,7 @@ void* UPB_PRIVATE(_upb_Arena_SlowMalloc)(upb_Arena* a, size_t size) {
397
388
memory_order_relaxed );
398
389
if (UPB_UNLIKELY (insert_after_head )) {
399
390
upb_ArenaInternal * ai = upb_Arena_Internal (a );
400
- block -> size_or_hint = actual_block_size ;
391
+ block -> size = actual_block_size ;
401
392
upb_MemBlock * head = ai -> blocks ;
402
393
block -> next = head -> next ;
403
394
head -> next = block ;
@@ -407,6 +398,7 @@ void* UPB_PRIVATE(_upb_Arena_SlowMalloc)(upb_Arena* a, size_t size) {
407
398
UPB_PRIVATE (kUpb_Asan_GuardSize ));
408
399
return allocated ;
409
400
} else {
401
+ ai -> size_hint = actual_block_size ;
410
402
_upb_Arena_AddBlock (a , block , kUpb_MemblockReserve , actual_block_size );
411
403
UPB_ASSERT (UPB_PRIVATE (_upb_ArenaHas )(a ) >= size );
412
404
return upb_Arena_Malloc (a , size - UPB_PRIVATE (kUpb_Asan_GuardSize ));
@@ -432,8 +424,8 @@ static upb_Arena* _upb_Arena_InitSlow(upb_alloc* alloc, size_t first_size) {
432
424
size_t actual_block_size = alloc_result .n ;
433
425
434
426
a = UPB_PTR_AT (mem , kUpb_MemblockReserve , upb_ArenaState );
435
-
436
427
a -> body .block_alloc = _upb_Arena_MakeBlockAlloc (alloc , 0 );
428
+ a -> body .size_hint = actual_block_size ;
437
429
upb_Atomic_Init (& a -> body .parent_or_count , _upb_Arena_TaggedFromRefcount (1 ));
438
430
upb_Atomic_Init (& a -> body .next , NULL );
439
431
upb_Atomic_Init (& a -> body .previous_or_tail ,
@@ -475,6 +467,7 @@ upb_Arena* upb_Arena_Init(void* mem, size_t n, upb_alloc* alloc) {
475
467
_upb_Arena_TaggedFromTail (& a -> body ));
476
468
upb_Atomic_Init (& a -> body .space_allocated , 0 );
477
469
a -> body .blocks = NULL ;
470
+ a -> body .size_hint = 128 ;
478
471
a -> body .upb_alloc_cleanup = NULL ;
479
472
a -> body .block_alloc = _upb_Arena_MakeBlockAlloc (alloc , 1 );
480
473
a -> head .UPB_PRIVATE (ptr ) = (void * )UPB_ALIGN_MALLOC ((uintptr_t )(a + 1 ));
@@ -502,15 +495,11 @@ static void _upb_Arena_DoFree(upb_ArenaInternal* ai) {
502
495
}
503
496
upb_alloc * block_alloc = _upb_ArenaInternal_BlockAlloc (ai );
504
497
upb_MemBlock * block = ai -> blocks ;
505
- if (block && block -> next ) {
506
- block -> size_or_hint =
507
- upb_Arena_FromInternal (ai )-> UPB_PRIVATE (end ) - (char * )block ;
508
- }
509
498
upb_AllocCleanupFunc * alloc_cleanup = * ai -> upb_alloc_cleanup ;
510
499
while (block != NULL ) {
511
500
// Load first since we are deleting block.
512
501
upb_MemBlock * next_block = block -> next ;
513
- upb_free_sized (block_alloc , block , block -> size_or_hint );
502
+ upb_free_sized (block_alloc , block , block -> size );
514
503
block = next_block ;
515
504
}
516
505
if (alloc_cleanup != NULL ) {
@@ -847,6 +836,7 @@ void UPB_PRIVATE(_upb_Arena_SwapIn)(upb_Arena* des, const upb_Arena* src) {
847
836
* des = * src ;
848
837
desi -> block_alloc = srci -> block_alloc ;
849
838
desi -> blocks = srci -> blocks ;
839
+ desi -> size_hint = srci -> size_hint ;
850
840
}
851
841
852
842
void UPB_PRIVATE (_upb_Arena_SwapOut )(upb_Arena * des , const upb_Arena * src ) {
@@ -855,6 +845,7 @@ void UPB_PRIVATE(_upb_Arena_SwapOut)(upb_Arena* des, const upb_Arena* src) {
855
845
856
846
* des = * src ;
857
847
desi -> blocks = srci -> blocks ;
848
+ desi -> size_hint = srci -> size_hint ;
858
849
}
859
850
860
851
bool _upb_Arena_WasLastAlloc (struct upb_Arena * a , void * ptr , size_t oldsize ) {
@@ -866,5 +857,5 @@ bool _upb_Arena_WasLastAlloc(struct upb_Arena* a, void* ptr, size_t oldsize) {
866
857
char * start = UPB_PTR_AT (block , kUpb_MemblockReserve , char );
867
858
return UPB_PRIVATE (upb_Xsan_PtrEq )(ptr , start ) &&
868
859
UPB_PRIVATE (_upb_Arena_AllocSpan )(oldsize ) ==
869
- block -> size_or_hint - kUpb_MemblockReserve ;
860
+ block -> size - kUpb_MemblockReserve ;
870
861
}
0 commit comments