-
Notifications
You must be signed in to change notification settings - Fork 14.4k
[scudo] Move out the definitions of member functions in primary allocators #147601
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Conversation
…ators This greatly improves the readability so that we are able to tell the design by the concise class definition.
@llvm/pr-subscribers-compiler-rt-sanitizer Author: None (ChiaHungDuan) ChangesThis greatly improves the readability so that we are able to tell the design by the concise class definition. Patch is 224.27 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/147601.diff 2 Files Affected:
diff --git a/compiler-rt/lib/scudo/standalone/primary32.h b/compiler-rt/lib/scudo/standalone/primary32.h
index 756adcadd0c7b..176793692b760 100644
--- a/compiler-rt/lib/scudo/standalone/primary32.h
+++ b/compiler-rt/lib/scudo/standalone/primary32.h
@@ -76,315 +76,64 @@ template <typename Config> class SizeClassAllocator32 {
static bool canAllocate(uptr Size) { return Size <= SizeClassMap::MaxSize; }
- void init(s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS {
- if (SCUDO_FUCHSIA)
- reportError("SizeClassAllocator32 is not supported on Fuchsia");
-
- if (SCUDO_TRUSTY)
- reportError("SizeClassAllocator32 is not supported on Trusty");
-
- DCHECK(isAligned(reinterpret_cast<uptr>(this), alignof(ThisT)));
- PossibleRegions.init();
- u32 Seed;
- const u64 Time = getMonotonicTimeFast();
- if (!getRandom(reinterpret_cast<void *>(&Seed), sizeof(Seed)))
- Seed = static_cast<u32>(
- Time ^ (reinterpret_cast<uptr>(SizeClassInfoArray) >> 6));
- for (uptr I = 0; I < NumClasses; I++) {
- SizeClassInfo *Sci = getSizeClassInfo(I);
- Sci->RandState = getRandomU32(&Seed);
- // Sci->MaxRegionIndex is already initialized to 0.
- Sci->MinRegionIndex = NumRegions;
- Sci->ReleaseInfo.LastReleaseAtNs = Time;
- }
-
- // The default value in the primary config has the higher priority.
- if (Config::getDefaultReleaseToOsIntervalMs() != INT32_MIN)
- ReleaseToOsInterval = Config::getDefaultReleaseToOsIntervalMs();
- setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
- }
-
- void unmapTestOnly() {
- {
- ScopedLock L(RegionsStashMutex);
- while (NumberOfStashedRegions > 0) {
- unmap(reinterpret_cast<void *>(RegionsStash[--NumberOfStashedRegions]),
- RegionSize);
- }
- }
-
- uptr MinRegionIndex = NumRegions, MaxRegionIndex = 0;
- for (uptr I = 0; I < NumClasses; I++) {
- SizeClassInfo *Sci = getSizeClassInfo(I);
- ScopedLock L(Sci->Mutex);
- if (Sci->MinRegionIndex < MinRegionIndex)
- MinRegionIndex = Sci->MinRegionIndex;
- if (Sci->MaxRegionIndex > MaxRegionIndex)
- MaxRegionIndex = Sci->MaxRegionIndex;
- *Sci = {};
- }
+ void init(s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS;
- ScopedLock L(ByteMapMutex);
- for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++)
- if (PossibleRegions[I])
- unmap(reinterpret_cast<void *>(I * RegionSize), RegionSize);
- PossibleRegions.unmapTestOnly();
- }
+ void unmapTestOnly();
// When all blocks are freed, it has to be the same size as `AllocatedUser`.
- void verifyAllBlocksAreReleasedTestOnly() {
- // `BatchGroup` and `Batch` also use the blocks from BatchClass.
- uptr BatchClassUsedInFreeLists = 0;
- for (uptr I = 0; I < NumClasses; I++) {
- // We have to count BatchClassUsedInFreeLists in other regions first.
- if (I == SizeClassMap::BatchClassId)
- continue;
- SizeClassInfo *Sci = getSizeClassInfo(I);
- ScopedLock L1(Sci->Mutex);
- uptr TotalBlocks = 0;
- for (BatchGroupT &BG : Sci->FreeListInfo.BlockList) {
- // `BG::Batches` are `Batches`. +1 for `BatchGroup`.
- BatchClassUsedInFreeLists += BG.Batches.size() + 1;
- for (const auto &It : BG.Batches)
- TotalBlocks += It.getCount();
- }
-
- const uptr BlockSize = getSizeByClassId(I);
- DCHECK_EQ(TotalBlocks, Sci->AllocatedUser / BlockSize);
- DCHECK_EQ(Sci->FreeListInfo.PushedBlocks, Sci->FreeListInfo.PoppedBlocks);
- }
-
- SizeClassInfo *Sci = getSizeClassInfo(SizeClassMap::BatchClassId);
- ScopedLock L1(Sci->Mutex);
- uptr TotalBlocks = 0;
- for (BatchGroupT &BG : Sci->FreeListInfo.BlockList) {
- if (LIKELY(!BG.Batches.empty())) {
- for (const auto &It : BG.Batches)
- TotalBlocks += It.getCount();
- } else {
- // `BatchGroup` with empty freelist doesn't have `Batch` record
- // itself.
- ++TotalBlocks;
- }
- }
-
- const uptr BlockSize = getSizeByClassId(SizeClassMap::BatchClassId);
- DCHECK_EQ(TotalBlocks + BatchClassUsedInFreeLists,
- Sci->AllocatedUser / BlockSize);
- const uptr BlocksInUse =
- Sci->FreeListInfo.PoppedBlocks - Sci->FreeListInfo.PushedBlocks;
- DCHECK_EQ(BlocksInUse, BatchClassUsedInFreeLists);
- }
+ void verifyAllBlocksAreReleasedTestOnly();
CompactPtrT compactPtr(UNUSED uptr ClassId, uptr Ptr) const {
return static_cast<CompactPtrT>(Ptr);
}
-
void *decompactPtr(UNUSED uptr ClassId, CompactPtrT CompactPtr) const {
return reinterpret_cast<void *>(static_cast<uptr>(CompactPtr));
}
-
uptr compactPtrGroupBase(CompactPtrT CompactPtr) {
const uptr Mask = (static_cast<uptr>(1) << GroupSizeLog) - 1;
return CompactPtr & ~Mask;
}
-
uptr decompactGroupBase(uptr CompactPtrGroupBase) {
return CompactPtrGroupBase;
}
-
- ALWAYS_INLINE static bool isSmallBlock(uptr BlockSize) {
+ ALWAYS_INLINE bool isSmallBlock(uptr BlockSize) {
const uptr PageSize = getPageSizeCached();
return BlockSize < PageSize / 16U;
}
-
- ALWAYS_INLINE static bool isLargeBlock(uptr BlockSize) {
+ ALWAYS_INLINE bool isLargeBlock(uptr BlockSize) {
const uptr PageSize = getPageSizeCached();
return BlockSize > PageSize;
}
u16 popBlocks(SizeClassAllocatorT *SizeClassAllocator, uptr ClassId,
- CompactPtrT *ToArray, const u16 MaxBlockCount) {
- DCHECK_LT(ClassId, NumClasses);
- SizeClassInfo *Sci = getSizeClassInfo(ClassId);
- ScopedLock L(Sci->Mutex);
-
- u16 PopCount =
- popBlocksImpl(SizeClassAllocator, ClassId, Sci, ToArray, MaxBlockCount);
- if (UNLIKELY(PopCount == 0)) {
- if (UNLIKELY(!populateFreeList(SizeClassAllocator, ClassId, Sci)))
- return 0U;
- PopCount = popBlocksImpl(SizeClassAllocator, ClassId, Sci, ToArray,
- MaxBlockCount);
- DCHECK_NE(PopCount, 0U);
- }
-
- return PopCount;
- }
+ CompactPtrT *ToArray, const u16 MaxBlockCount);
// Push the array of free blocks to the designated batch group.
void pushBlocks(SizeClassAllocatorT *SizeClassAllocator, uptr ClassId,
- CompactPtrT *Array, u32 Size) {
- DCHECK_LT(ClassId, NumClasses);
- DCHECK_GT(Size, 0);
-
- SizeClassInfo *Sci = getSizeClassInfo(ClassId);
- if (ClassId == SizeClassMap::BatchClassId) {
- ScopedLock L(Sci->Mutex);
- pushBatchClassBlocks(Sci, Array, Size);
- return;
- }
-
- // TODO(chiahungduan): Consider not doing grouping if the group size is not
- // greater than the block size with a certain scale.
-
- // Sort the blocks so that blocks belonging to the same group can be pushed
- // together.
- bool SameGroup = true;
- for (u32 I = 1; I < Size; ++I) {
- if (compactPtrGroupBase(Array[I - 1]) != compactPtrGroupBase(Array[I]))
- SameGroup = false;
- CompactPtrT Cur = Array[I];
- u32 J = I;
- while (J > 0 &&
- compactPtrGroupBase(Cur) < compactPtrGroupBase(Array[J - 1])) {
- Array[J] = Array[J - 1];
- --J;
- }
- Array[J] = Cur;
- }
-
- ScopedLock L(Sci->Mutex);
- pushBlocksImpl(SizeClassAllocator, ClassId, Sci, Array, Size, SameGroup);
- }
+ CompactPtrT *Array, u32 Size);
- void disable() NO_THREAD_SAFETY_ANALYSIS {
- // The BatchClassId must be locked last since other classes can use it.
- for (sptr I = static_cast<sptr>(NumClasses) - 1; I >= 0; I--) {
- if (static_cast<uptr>(I) == SizeClassMap::BatchClassId)
- continue;
- getSizeClassInfo(static_cast<uptr>(I))->Mutex.lock();
- }
- getSizeClassInfo(SizeClassMap::BatchClassId)->Mutex.lock();
- RegionsStashMutex.lock();
- ByteMapMutex.lock();
- }
+ void disable() NO_THREAD_SAFETY_ANALYSIS;
+ void enable() NO_THREAD_SAFETY_ANALYSIS;
- void enable() NO_THREAD_SAFETY_ANALYSIS {
- ByteMapMutex.unlock();
- RegionsStashMutex.unlock();
- getSizeClassInfo(SizeClassMap::BatchClassId)->Mutex.unlock();
- for (uptr I = 0; I < NumClasses; I++) {
- if (I == SizeClassMap::BatchClassId)
- continue;
- getSizeClassInfo(I)->Mutex.unlock();
- }
- }
-
- template <typename F> void iterateOverBlocks(F Callback) {
- uptr MinRegionIndex = NumRegions, MaxRegionIndex = 0;
- for (uptr I = 0; I < NumClasses; I++) {
- SizeClassInfo *Sci = getSizeClassInfo(I);
- // TODO: The call of `iterateOverBlocks` requires disabling
- // SizeClassAllocator32. We may consider locking each region on demand
- // only.
- Sci->Mutex.assertHeld();
- if (Sci->MinRegionIndex < MinRegionIndex)
- MinRegionIndex = Sci->MinRegionIndex;
- if (Sci->MaxRegionIndex > MaxRegionIndex)
- MaxRegionIndex = Sci->MaxRegionIndex;
- }
-
- // SizeClassAllocator32 is disabled, i.e., ByteMapMutex is held.
- ByteMapMutex.assertHeld();
-
- for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++) {
- if (PossibleRegions[I] &&
- (PossibleRegions[I] - 1U) != SizeClassMap::BatchClassId) {
- const uptr BlockSize = getSizeByClassId(PossibleRegions[I] - 1U);
- const uptr From = I * RegionSize;
- const uptr To = From + (RegionSize / BlockSize) * BlockSize;
- for (uptr Block = From; Block < To; Block += BlockSize)
- Callback(Block);
- }
- }
- }
-
- void getStats(ScopedString *Str) {
- // TODO(kostyak): get the RSS per region.
- uptr TotalMapped = 0;
- uptr PoppedBlocks = 0;
- uptr PushedBlocks = 0;
- for (uptr I = 0; I < NumClasses; I++) {
- SizeClassInfo *Sci = getSizeClassInfo(I);
- ScopedLock L(Sci->Mutex);
- TotalMapped += Sci->AllocatedUser;
- PoppedBlocks += Sci->FreeListInfo.PoppedBlocks;
- PushedBlocks += Sci->FreeListInfo.PushedBlocks;
- }
- Str->append("Stats: SizeClassAllocator32: %zuM mapped in %zu allocations; "
- "remains %zu\n",
- TotalMapped >> 20, PoppedBlocks, PoppedBlocks - PushedBlocks);
- for (uptr I = 0; I < NumClasses; I++) {
- SizeClassInfo *Sci = getSizeClassInfo(I);
- ScopedLock L(Sci->Mutex);
- getStats(Str, I, Sci);
- }
- }
-
- void getFragmentationInfo(ScopedString *Str) {
- Str->append(
- "Fragmentation Stats: SizeClassAllocator32: page size = %zu bytes\n",
- getPageSizeCached());
-
- for (uptr I = 1; I < NumClasses; I++) {
- SizeClassInfo *Sci = getSizeClassInfo(I);
- ScopedLock L(Sci->Mutex);
- getSizeClassFragmentationInfo(Sci, I, Str);
- }
- }
+ template <typename F> void iterateOverBlocks(F Callback);
+ void getStats(ScopedString *Str);
+ void getFragmentationInfo(ScopedString *Str);
void getMemoryGroupFragmentationInfo(ScopedString *Str) {
// Each region is also a memory group because region size is the same as
// group size.
getFragmentationInfo(Str);
}
- bool setOption(Option O, sptr Value) {
- if (O == Option::ReleaseInterval) {
- const s32 Interval = Max(
- Min(static_cast<s32>(Value), Config::getMaxReleaseToOsIntervalMs()),
- Config::getMinReleaseToOsIntervalMs());
- atomic_store_relaxed(&ReleaseToOsIntervalMs, Interval);
- return true;
- }
- // Not supported by the Primary, but not an error either.
- return true;
- }
+ bool setOption(Option O, sptr Value);
- uptr tryReleaseToOS(uptr ClassId, ReleaseToOS ReleaseType) {
- SizeClassInfo *Sci = getSizeClassInfo(ClassId);
- // TODO: Once we have separate locks like primary64, we may consider using
- // tryLock() as well.
- ScopedLock L(Sci->Mutex);
- return releaseToOSMaybe(Sci, ClassId, ReleaseType);
- }
-
- uptr releaseToOS(ReleaseToOS ReleaseType) {
- uptr TotalReleasedBytes = 0;
- for (uptr I = 0; I < NumClasses; I++) {
- if (I == SizeClassMap::BatchClassId)
- continue;
- SizeClassInfo *Sci = getSizeClassInfo(I);
- ScopedLock L(Sci->Mutex);
- TotalReleasedBytes += releaseToOSMaybe(Sci, I, ReleaseType);
- }
- return TotalReleasedBytes;
- }
+ uptr tryReleaseToOS(uptr ClassId, ReleaseToOS ReleaseType);
+ uptr releaseToOS(ReleaseToOS ReleaseType);
const char *getRegionInfoArrayAddress() const { return nullptr; }
static uptr getRegionInfoArraySize() { return 0; }
+ // Not supported in SizeClassAllocator32.
static BlockInfo findNearestBlock(UNUSED const char *RegionInfoData,
UNUSED uptr Ptr) {
return {};
@@ -434,755 +183,1086 @@ template <typename Config> class SizeClassAllocator32 {
return Id;
}
- uptr allocateRegionSlow() {
- uptr MapSize = 2 * RegionSize;
- const uptr MapBase = reinterpret_cast<uptr>(
- map(nullptr, MapSize, "scudo:primary", MAP_ALLOWNOMEM));
- if (!MapBase)
- return 0;
- const uptr MapEnd = MapBase + MapSize;
- uptr Region = MapBase;
- if (isAligned(Region, RegionSize)) {
- ScopedLock L(RegionsStashMutex);
- if (NumberOfStashedRegions < MaxStashedRegions)
- RegionsStash[NumberOfStashedRegions++] = MapBase + RegionSize;
- else
- MapSize = RegionSize;
- } else {
- Region = roundUp(MapBase, RegionSize);
- unmap(reinterpret_cast<void *>(MapBase), Region - MapBase);
- MapSize = RegionSize;
- }
- const uptr End = Region + MapSize;
- if (End != MapEnd)
- unmap(reinterpret_cast<void *>(End), MapEnd - End);
+ uptr allocateRegion(SizeClassInfo *Sci, uptr ClassId) REQUIRES(Sci->Mutex);
+ uptr allocateRegionSlow();
+
+ SizeClassInfo *getSizeClassInfo(uptr ClassId) {
+ DCHECK_LT(ClassId, NumClasses);
+ return &SizeClassInfoArray[ClassId];
+ }
- DCHECK_EQ(Region % RegionSize, 0U);
- static_assert(Config::getRegionSizeLog() == GroupSizeLog,
- "Memory group should be the same size as Region");
+ void pushBatchClassBlocks(SizeClassInfo *Sci, CompactPtrT *Array, u32 Size)
+ REQUIRES(Sci->Mutex);
- return Region;
+ // Push the blocks to their batch group. The layout will be like,
+ void pushBlocksImpl(SizeClassAllocatorT *SizeClassAllocator, uptr ClassId,
+ SizeClassInfo *Sci, CompactPtrT *Array, u32 Size,
+ bool SameGroup = false) REQUIRES(Sci->Mutex);
+ u16 popBlocksImpl(SizeClassAllocatorT *SizeClassAllocator, uptr ClassId,
+ SizeClassInfo *Sci, CompactPtrT *ToArray,
+ const u16 MaxBlockCount) REQUIRES(Sci->Mutex);
+ NOINLINE bool populateFreeList(SizeClassAllocatorT *SizeClassAllocator,
+ uptr ClassId, SizeClassInfo *Sci)
+ REQUIRES(Sci->Mutex);
+
+ void getStats(ScopedString *Str, uptr ClassId, SizeClassInfo *Sci)
+ REQUIRES(Sci->Mutex);
+ void getSizeClassFragmentationInfo(SizeClassInfo *Sci, uptr ClassId,
+ ScopedString *Str) REQUIRES(Sci->Mutex);
+
+ NOINLINE uptr releaseToOSMaybe(SizeClassInfo *Sci, uptr ClassId,
+ ReleaseToOS ReleaseType = ReleaseToOS::Normal)
+ REQUIRES(Sci->Mutex);
+ bool hasChanceToReleasePages(SizeClassInfo *Sci, uptr BlockSize,
+ uptr BytesInFreeList, ReleaseToOS ReleaseType)
+ REQUIRES(Sci->Mutex);
+ PageReleaseContext markFreeBlocks(SizeClassInfo *Sci, const uptr ClassId,
+ const uptr BlockSize, const uptr Base,
+ const uptr NumberOfRegions,
+ ReleaseToOS ReleaseType)
+ REQUIRES(Sci->Mutex);
+
+ SizeClassInfo SizeClassInfoArray[NumClasses] = {};
+ HybridMutex ByteMapMutex;
+ // Track the regions in use, 0 is unused, otherwise store ClassId + 1.
+ ByteMap PossibleRegions GUARDED_BY(ByteMapMutex) = {};
+ atomic_s32 ReleaseToOsIntervalMs = {};
+ // Unless several threads request regions simultaneously from different size
+ // classes, the stash rarely contains more than 1 entry.
+ static constexpr uptr MaxStashedRegions = 4;
+ HybridMutex RegionsStashMutex;
+ uptr NumberOfStashedRegions GUARDED_BY(RegionsStashMutex) = 0;
+ uptr RegionsStash[MaxStashedRegions] GUARDED_BY(RegionsStashMutex) = {};
+};
+
+template <typename Config>
+void SizeClassAllocator32<Config>::init(s32 ReleaseToOsInterval)
+ NO_THREAD_SAFETY_ANALYSIS {
+ if (SCUDO_FUCHSIA)
+ reportError("SizeClassAllocator32 is not supported on Fuchsia");
+
+ if (SCUDO_TRUSTY)
+ reportError("SizeClassAllocator32 is not supported on Trusty");
+
+ DCHECK(isAligned(reinterpret_cast<uptr>(this), alignof(ThisT)));
+ PossibleRegions.init();
+ u32 Seed;
+ const u64 Time = getMonotonicTimeFast();
+ if (!getRandom(reinterpret_cast<void *>(&Seed), sizeof(Seed)))
+ Seed = static_cast<u32>(Time ^
+ (reinterpret_cast<uptr>(SizeClassInfoArray) >> 6));
+ for (uptr I = 0; I < NumClasses; I++) {
+ SizeClassInfo *Sci = getSizeClassInfo(I);
+ Sci->RandState = getRandomU32(&Seed);
+ // Sci->MaxRegionIndex is already initialized to 0.
+ Sci->MinRegionIndex = NumRegions;
+ Sci->ReleaseInfo.LastReleaseAtNs = Time;
}
- uptr allocateRegion(SizeClassInfo *Sci, uptr ClassId) REQUIRES(Sci->Mutex) {
- DCHECK_LT(ClassId, NumClasses);
- uptr Region = 0;
- {
- ScopedLock L(RegionsStashMutex);
- if (NumberOfStashedRegions > 0)
- Region = RegionsStash[--NumberOfStashedRegions];
+ // The default value in the primary config has the higher priority.
+ if (Config::getDefaultReleaseToOsIntervalMs() != INT32_MIN)
+ ReleaseToOsInterval = Config::getDefaultReleaseToOsIntervalMs();
+ setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
+}
+
+template <typename Config> void SizeClassAllocator32<Config>::unmapTestOnly() {
+ {
+ ScopedLock L(RegionsStashMutex);
+ while (NumberOfStashedRegions > 0) {
+ unmap(reinterpret_cast<void *>(RegionsStash[--NumberOfStashedRegions]),
+ RegionSize);
}
- if (!Region)
- Region = allocateRegionSlow();
- if (LIKELY(Region)) {
- // Sci->Mutex is held by the caller, updating the Min/Max is safe.
- const uptr RegionIndex = computeRegionId(Region);
- if (RegionIndex < Sci->MinRegionIndex)
- Sci->MinRegionIndex = RegionIndex;
- if (RegionIndex > Sci->MaxRegionIndex)
- Sci->MaxRegionIndex = RegionIndex;
- ScopedLock L(ByteMapMutex);
- PossibleRegions.set(RegionIndex, static_cast<u8>(ClassId + 1U));
+ }
+
+ uptr MinRegionIndex = NumRegions, MaxRegionIndex = 0;
+ for (uptr I = 0; I < NumClasses; I++) {
+ SizeClassInfo *Sci = getSizeClassInfo(I);
+ ScopedLock L(Sci->Mutex);
+ if (Sci->MinRegionIndex < MinRegionIndex)
+ MinRegionIndex = Sci->MinRegionIndex;
+ if (Sci->MaxRegionIndex > MaxRegionIndex)
+ MaxRegionIndex = Sci->MaxRegionIndex;
+ *Sci = {};
+ }
+
+ ScopedLock L(ByteMapMutex);
+ for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++)
+ if (PossibleRegions[I])
+ unmap(reinterpret_cast<void *>(I * RegionSize), RegionSize);
+ PossibleRegions.unmapTestOnly();
+}
+template <typename Config>
+void SizeClassAllocator32<Config>::verifyAllBlocksAreReleasedTestOnly() {
+ // `BatchGroup` and `Batch` also use the blocks from BatchClass.
+ uptr BatchClassUsedInFreeLists = 0;
+ for (uptr I = 0; I < NumClasses; I++) {
+ // We have to count BatchClassUsedInFreeLists in other regions first.
+ if (I == SizeClassMap::BatchClassId)
+ continue;
+ SizeClassInfo *Sci = getSizeClassInfo(I);
+ ScopedLock L1(Sci->Mutex);
+ uptr TotalBlocks = 0;
+ for (BatchGroupT &BG : Sci->FreeListInfo.BlockList) {
+ // `BG::Batches` are `Batches`. +1 for `BatchGroup`.
+ BatchClassUsedInFreeLists += BG.Batches.size() + 1;
+ for (const auto &It : BG.Batches)
+ TotalBlocks += It.getCount();
}
- return Region;
+
+ const uptr BlockSize = getSizeByClassId(I);
+ DCHECK_EQ(TotalBlocks, Sci->AllocatedUser / BlockSize);
+ DCHECK_EQ(Sci->FreeListInfo.PushedBlocks, Sci->FreeListInfo.PoppedBlocks);
}
- SizeClassInfo *getSizeClassInfo(uptr ClassId) {
- DCHECK_LT(ClassId, NumClasses);
- return &SizeClassInfoArray[ClassId];
+ SizeClassInfo *Sci = getSizeClassInfo(SizeClassMap::BatchClassId);
+ ScopedLock L1(Sci->Mutex);
+ uptr TotalBlocks = 0;
+ for (BatchGroupT &...
[truncated]
|
BTW, this is a NFC and I also added few comments to the functions. |
if (PossibleRegions[I]) | ||
unmap(reinterpret_cast<void *>(I * RegionSize), RegionSize); | ||
PossibleRegions.unmapTestOnly(); | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Add a newline, that seems to be the pattern for these functions.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
done
} | ||
} | ||
|
||
// FreeListInfo.BlockList - > BG -> BG -> BG |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Did you mean to remove the first part of this comment?
The original comment had this at the front:
// Push the blocks to their batch group. The layout will be like,
//
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
My fault, I thought the first line was a summary so I extracted to the declaration. Given that pushBlocks()
already has a comment. I've restored the comment here back
// will be pushed next. `Prev` is the element right before `Cur`. | ||
BatchGroupT *Prev = nullptr; | ||
template <typename Config> | ||
void SizeClassAllocator64<Config>::initRegion(RegionInfo *Region, uptr ClassId, |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This loses the ALWAYS_INLINE tag. Does this cause any increase in the code size of Scudo?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I keep the one at the function declaration and it doesn't show up in symbol table.
Region->MemMapInfo.AllocatedUser += AllocatedUser; | ||
|
||
return PopCount; | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Add a newline.
} | ||
} | ||
|
||
// FreeListInfo.BlockList - > BG -> BG -> BG |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Same as in primary32.h, missing the first two lines:
// Push the blocks to their batch group. The layout will be like,
//
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Done
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM.
This greatly improves the readability so that we are able to tell the design by the concise class definition.