@@ -34,7 +34,7 @@ namespace scudo {
34
34
// predictable address pattern (the predictability increases with the block
35
35
// size).
36
36
//
37
- // Regions for size class 0 are special and used to hold TransferBatches , which
37
+ // Regions for size class 0 are special and used to hold Batches , which
38
38
// allow to transfer arrays of pointers from the global size class freelist to
39
39
// the thread specific freelist for said class, and back.
40
40
//
@@ -56,12 +56,21 @@ template <typename Config> class SizeClassAllocator32 {
56
56
typename Conditional<Config::getEnableBlockCache(),
57
57
SizeClassAllocatorLocalCache<ThisT>,
58
58
SizeClassAllocatorNoCache<ThisT>>::type;
59
- typedef TransferBatch <ThisT> TransferBatchT ;
59
+ typedef Batch <ThisT> BatchT ;
60
60
typedef BatchGroup<ThisT> BatchGroupT;
61
+ static const u16 MaxNumBlocksInBatch = SizeClassMap::MaxNumCachedHint;
62
+
63
+ static constexpr uptr getSizeOfBatchClass () {
64
+ const uptr HeaderSize = sizeof (BatchT);
65
+ return HeaderSize + sizeof (CompactPtrT) * MaxNumBlocksInBatch;
66
+ }
67
+
68
+ static_assert (sizeof (BatchGroupT) <= getSizeOfBatchClass(),
69
+ "BatchGroupT also uses BatchClass");
61
70
62
71
static uptr getSizeByClassId (uptr ClassId) {
63
72
return (ClassId == SizeClassMap::BatchClassId)
64
- ? Max ( sizeof (BatchGroupT), sizeof (TransferBatchT) )
73
+ ? getSizeOfBatchClass ( )
65
74
: SizeClassMap::getSizeByClassId (ClassId);
66
75
}
67
76
@@ -124,7 +133,7 @@ template <typename Config> class SizeClassAllocator32 {
124
133
125
134
// When all blocks are freed, it has to be the same size as `AllocatedUser`.
126
135
void verifyAllBlocksAreReleasedTestOnly () {
127
- // `BatchGroup` and `TransferBatch ` also use the blocks from BatchClass.
136
+ // `BatchGroup` and `Batch ` also use the blocks from BatchClass.
128
137
uptr BatchClassUsedInFreeLists = 0 ;
129
138
for (uptr I = 0 ; I < NumClasses; I++) {
130
139
// We have to count BatchClassUsedInFreeLists in other regions first.
@@ -134,7 +143,7 @@ template <typename Config> class SizeClassAllocator32 {
134
143
ScopedLock L1 (Sci->Mutex );
135
144
uptr TotalBlocks = 0 ;
136
145
for (BatchGroupT &BG : Sci->FreeListInfo .BlockList ) {
137
- // `BG::Batches` are `TransferBatches `. +1 for `BatchGroup`.
146
+ // `BG::Batches` are `Batches `. +1 for `BatchGroup`.
138
147
BatchClassUsedInFreeLists += BG.Batches .size () + 1 ;
139
148
for (const auto &It : BG.Batches )
140
149
TotalBlocks += It.getCount ();
@@ -153,7 +162,7 @@ template <typename Config> class SizeClassAllocator32 {
153
162
for (const auto &It : BG.Batches )
154
163
TotalBlocks += It.getCount ();
155
164
} else {
156
- // `BatchGroup` with empty freelist doesn't have `TransferBatch ` record
165
+ // `BatchGroup` with empty freelist doesn't have `Batch ` record
157
166
// itself.
158
167
++TotalBlocks;
159
168
}
@@ -487,39 +496,39 @@ template <typename Config> class SizeClassAllocator32 {
487
496
REQUIRES(Sci->Mutex) {
488
497
DCHECK_EQ (Sci, getSizeClassInfo (SizeClassMap::BatchClassId));
489
498
490
- // Free blocks are recorded by TransferBatch in freelist for all
491
- // size-classes. In addition, TransferBatch is allocated from BatchClassId.
499
+ // Free blocks are recorded by Batch in freelist for all
500
+ // size-classes. In addition, Batch is allocated from BatchClassId.
492
501
// In order not to use additional block to record the free blocks in
493
- // BatchClassId, they are self-contained. I.e., A TransferBatch records the
502
+ // BatchClassId, they are self-contained. I.e., A Batch records the
494
503
// block address of itself. See the figure below:
495
504
//
496
- // TransferBatch at 0xABCD
505
+ // Batch at 0xABCD
497
506
// +----------------------------+
498
507
// | Free blocks' addr |
499
508
// | +------+------+------+ |
500
509
// | |0xABCD|... |... | |
501
510
// | +------+------+------+ |
502
511
// +----------------------------+
503
512
//
504
- // When we allocate all the free blocks in the TransferBatch , the block used
505
- // by TransferBatch is also free for use. We don't need to recycle the
506
- // TransferBatch . Note that the correctness is maintained by the invariant,
513
+ // When we allocate all the free blocks in the Batch , the block used
514
+ // by Batch is also free for use. We don't need to recycle the
515
+ // Batch . Note that the correctness is maintained by the invariant,
507
516
//
508
- // Each popBlocks() request returns the entire TransferBatch . Returning
509
- // part of the blocks in a TransferBatch is invalid.
517
+ // Each popBlocks() request returns the entire Batch . Returning
518
+ // part of the blocks in a Batch is invalid.
510
519
//
511
- // This ensures that TransferBatch won't leak the address itself while it's
520
+ // This ensures that Batch won't leak the address itself while it's
512
521
// still holding other valid data.
513
522
//
514
523
// Besides, BatchGroup is also allocated from BatchClassId and has its
515
- // address recorded in the TransferBatch too. To maintain the correctness,
524
+ // address recorded in the Batch too. To maintain the correctness,
516
525
//
517
- // The address of BatchGroup is always recorded in the last TransferBatch
526
+ // The address of BatchGroup is always recorded in the last Batch
518
527
// in the freelist (also imply that the freelist should only be
519
- // updated with push_front). Once the last TransferBatch is popped,
528
+ // updated with push_front). Once the last Batch is popped,
520
529
// the block used by BatchGroup is also free for use.
521
530
//
522
- // With this approach, the blocks used by BatchGroup and TransferBatch are
531
+ // With this approach, the blocks used by BatchGroup and Batch are
523
532
// reusable and don't need additional space for them.
524
533
525
534
Sci->FreeListInfo .PushedBlocks += Size;
@@ -548,27 +557,27 @@ template <typename Config> class SizeClassAllocator32 {
548
557
// 1. just allocated a new `BatchGroup`.
549
558
// 2. Only 1 block is pushed when the freelist is empty.
550
559
if (BG->Batches .empty ()) {
551
- // Construct the `TransferBatch ` on the last element.
552
- TransferBatchT *TB = reinterpret_cast <TransferBatchT *>(
560
+ // Construct the `Batch ` on the last element.
561
+ BatchT *TB = reinterpret_cast <BatchT *>(
553
562
decompactPtr (SizeClassMap::BatchClassId, Array[Size - 1 ]));
554
563
TB->clear ();
555
- // As mentioned above, addresses of `TransferBatch ` and `BatchGroup` are
556
- // recorded in the TransferBatch .
564
+ // As mentioned above, addresses of `Batch ` and `BatchGroup` are
565
+ // recorded in the Batch .
557
566
TB->add (Array[Size - 1 ]);
558
567
TB->add (
559
568
compactPtr (SizeClassMap::BatchClassId, reinterpret_cast <uptr>(BG)));
560
569
--Size;
561
570
BG->Batches .push_front (TB);
562
571
}
563
572
564
- TransferBatchT *CurBatch = BG->Batches .front ();
573
+ BatchT *CurBatch = BG->Batches .front ();
565
574
DCHECK_NE (CurBatch, nullptr );
566
575
567
576
for (u32 I = 0 ; I < Size;) {
568
577
u16 UnusedSlots =
569
578
static_cast <u16 >(BG->MaxCachedPerBatch - CurBatch->getCount ());
570
579
if (UnusedSlots == 0 ) {
571
- CurBatch = reinterpret_cast <TransferBatchT *>(
580
+ CurBatch = reinterpret_cast <BatchT *>(
572
581
decompactPtr (SizeClassMap::BatchClassId, Array[I]));
573
582
CurBatch->clear ();
574
583
// Self-contained
@@ -596,7 +605,7 @@ template <typename Config> class SizeClassAllocator32 {
596
605
// TB
597
606
//
598
607
// Each BlockGroup(BG) will associate with unique group id and the free blocks
599
- // are managed by a list of TransferBatch (TB). To reduce the time of inserting
608
+ // are managed by a list of Batch (TB). To reduce the time of inserting
600
609
// blocks, BGs are sorted and the input `Array` are supposed to be sorted so
601
610
// that we can get better performance of maintaining sorted property.
602
611
// Use `SameGroup=true` to indicate that all blocks in the array are from the
@@ -613,29 +622,29 @@ template <typename Config> class SizeClassAllocator32 {
613
622
BatchGroupT *BG = reinterpret_cast <BatchGroupT *>(
614
623
SizeClassAllocator->getBatchClassBlock ());
615
624
BG->Batches .clear ();
616
- TransferBatchT *TB = reinterpret_cast <TransferBatchT *>(
617
- SizeClassAllocator->getBatchClassBlock ());
625
+ BatchT *TB =
626
+ reinterpret_cast <BatchT *>( SizeClassAllocator->getBatchClassBlock ());
618
627
TB->clear ();
619
628
620
629
BG->CompactPtrGroupBase = CompactPtrGroupBase;
621
630
BG->Batches .push_front (TB);
622
631
BG->BytesInBGAtLastCheckpoint = 0 ;
623
- BG->MaxCachedPerBatch = TransferBatchT::MaxNumCached ;
632
+ BG->MaxCachedPerBatch = MaxNumBlocksInBatch ;
624
633
625
634
return BG;
626
635
};
627
636
628
637
auto InsertBlocks = [&](BatchGroupT *BG, CompactPtrT *Array, u32 Size) {
629
- SinglyLinkedList<TransferBatchT > &Batches = BG->Batches ;
630
- TransferBatchT *CurBatch = Batches.front ();
638
+ SinglyLinkedList<BatchT > &Batches = BG->Batches ;
639
+ BatchT *CurBatch = Batches.front ();
631
640
DCHECK_NE (CurBatch, nullptr );
632
641
633
642
for (u32 I = 0 ; I < Size;) {
634
643
DCHECK_GE (BG->MaxCachedPerBatch , CurBatch->getCount ());
635
644
u16 UnusedSlots =
636
645
static_cast <u16 >(BG->MaxCachedPerBatch - CurBatch->getCount ());
637
646
if (UnusedSlots == 0 ) {
638
- CurBatch = reinterpret_cast <TransferBatchT *>(
647
+ CurBatch = reinterpret_cast <BatchT *>(
639
648
SizeClassAllocator->getBatchClassBlock ());
640
649
CurBatch->clear ();
641
650
Batches.push_front (CurBatch);
@@ -716,7 +725,7 @@ template <typename Config> class SizeClassAllocator32 {
716
725
if (Sci->FreeListInfo .BlockList .empty ())
717
726
return 0U ;
718
727
719
- SinglyLinkedList<TransferBatchT > &Batches =
728
+ SinglyLinkedList<BatchT > &Batches =
720
729
Sci->FreeListInfo .BlockList .front ()->Batches ;
721
730
722
731
if (Batches.empty ()) {
@@ -725,27 +734,27 @@ template <typename Config> class SizeClassAllocator32 {
725
734
Sci->FreeListInfo .BlockList .pop_front ();
726
735
727
736
// Block used by `BatchGroup` is from BatchClassId. Turn the block into
728
- // `TransferBatch ` with single block.
729
- TransferBatchT *TB = reinterpret_cast <TransferBatchT *>(BG);
737
+ // `Batch ` with single block.
738
+ BatchT *TB = reinterpret_cast <BatchT *>(BG);
730
739
ToArray[0 ] =
731
740
compactPtr (SizeClassMap::BatchClassId, reinterpret_cast <uptr>(TB));
732
741
Sci->FreeListInfo .PoppedBlocks += 1 ;
733
742
return 1U ;
734
743
}
735
744
736
745
// So far, instead of always filling the blocks to `MaxBlockCount`, we only
737
- // examine single `TransferBatch ` to minimize the time spent on the primary
738
- // allocator. Besides, the sizes of `TransferBatch ` and
746
+ // examine single `Batch ` to minimize the time spent on the primary
747
+ // allocator. Besides, the sizes of `Batch ` and
739
748
// `SizeClassAllocatorT::getMaxCached()` may also impact the time spent on
740
749
// accessing the primary allocator.
741
750
// TODO(chiahungduan): Evaluate if we want to always prepare `MaxBlockCount`
742
- // blocks and/or adjust the size of `TransferBatch ` according to
751
+ // blocks and/or adjust the size of `Batch ` according to
743
752
// `SizeClassAllocatorT::getMaxCached()`.
744
- TransferBatchT *B = Batches.front ();
753
+ BatchT *B = Batches.front ();
745
754
DCHECK_NE (B, nullptr );
746
755
DCHECK_GT (B->getCount (), 0U );
747
756
748
- // BachClassId should always take all blocks in the TransferBatch . Read the
757
+ // BachClassId should always take all blocks in the Batch . Read the
749
758
// comment in `pushBatchClassBlocks()` for more details.
750
759
const u16 PopCount = ClassId == SizeClassMap::BatchClassId
751
760
? B->getCount ()
@@ -756,7 +765,7 @@ template <typename Config> class SizeClassAllocator32 {
756
765
// done without holding `Mutex`.
757
766
if (B->empty ()) {
758
767
Batches.pop_front ();
759
- // `TransferBatch ` of BatchClassId is self-contained, no need to
768
+ // `Batch ` of BatchClassId is self-contained, no need to
760
769
// deallocate. Read the comment in `pushBatchClassBlocks()` for more
761
770
// details.
762
771
if (ClassId != SizeClassMap::BatchClassId)
@@ -769,7 +778,7 @@ template <typename Config> class SizeClassAllocator32 {
769
778
// We don't keep BatchGroup with zero blocks to avoid empty-checking
770
779
// while allocating. Note that block used for constructing BatchGroup is
771
780
// recorded as free blocks in the last element of BatchGroup::Batches.
772
- // Which means, once we pop the last TransferBatch , the block is
781
+ // Which means, once we pop the last Batch , the block is
773
782
// implicitly deallocated.
774
783
if (ClassId != SizeClassMap::BatchClassId)
775
784
SizeClassAllocator->deallocate (SizeClassMap::BatchClassId, BG);
@@ -816,8 +825,7 @@ template <typename Config> class SizeClassAllocator32 {
816
825
static_cast <u32 >((RegionSize - Offset) / Size));
817
826
DCHECK_GT (NumberOfBlocks, 0U );
818
827
819
- constexpr u32 ShuffleArraySize =
820
- MaxNumBatches * TransferBatchT::MaxNumCached;
828
+ constexpr u32 ShuffleArraySize = MaxNumBatches * MaxNumBlocksInBatch;
821
829
// Fill the transfer batches and put them in the size-class freelist. We
822
830
// need to randomize the blocks for security purposes, so we first fill a
823
831
// local array that we then shuffle before populating the batches.
@@ -1102,7 +1110,7 @@ template <typename Config> class SizeClassAllocator32 {
1102
1110
if (AllocatedGroupSize == 0 )
1103
1111
continue ;
1104
1112
1105
- // TransferBatches are pushed in front of BG.Batches. The first one may
1113
+ // Batches are pushed in front of BG.Batches. The first one may
1106
1114
// not have all caches used.
1107
1115
const uptr NumBlocks = (BG.Batches .size () - 1 ) * BG.MaxCachedPerBatch +
1108
1116
BG.Batches .front ()->getCount ();
0 commit comments