diff --git a/lib/system/alloc.nim b/lib/system/alloc.nim index e2dd430759cd..3389642342b7 100644 --- a/lib/system/alloc.nim +++ b/lib/system/alloc.nim @@ -177,6 +177,26 @@ type when defined(nimTypeNames): allocCounter, deallocCounter: int + +proc asanPoisonMemoryRegion(p: pointer, size: int) {.importc: "ASAN_POISON_MEMORY_REGION", header: "".} +proc asanUnpoisonMemoryRegion(p: pointer, size: int) {.importc: "ASAN_UNPOISON_MEMORY_REGION", header: "".} + +template poison(p: pointer, size: int) = + #cprintf("-%p %lld\n", p, size) + when defined(useAsan): + asanPoisonMemoryRegion(p, size) + +template unpoison(p: pointer, size: int) = + #cprintf("+%p %lld\n", p, size) + when defined(useAsan): + asanUnpoisonMemoryRegion(p, size) + +template poisonGuard(p: pointer, size: int, body: untyped) = + unpoison(p, size) + body + poison(p, size) + + template smallChunkOverhead(): untyped = sizeof(SmallChunk) template bigChunkOverhead(): untyped = sizeof(BigChunk) @@ -319,13 +339,15 @@ proc allocPages(a: var MemRegion, size: int): pointer = when nimMaxHeap != 0: if a.occ + size > nimMaxHeap * 1024 * 1024: raiseOutOfMem() - osAllocPages(size) + result = osAllocPages(size) + poison(result, size) proc tryAllocPages(a: var MemRegion, size: int): pointer = when nimMaxHeap != 0: if a.occ + size > nimMaxHeap * 1024 * 1024: raiseOutOfMem() - osTryAllocPages(size) + result = osTryAllocPages(size) + poison(result, size) proc llAlloc(a: var MemRegion, size: int): pointer = # *low-level* alloc for the memory managers data structures. Deallocation @@ -337,6 +359,7 @@ proc llAlloc(a: var MemRegion, size: int): pointer = sysAssert roundup(size+sizeof(LLChunk), PageSize) == PageSize, "roundup 6" var old = a.llmem # can be nil and is correct with nil a.llmem = cast[PLLChunk](allocPages(a, PageSize)) + unpoison(a.llmem, sizeof(LLChunk)) when defined(nimAvlcorruption): trackLocation(a.llmem, PageSize) incCurrMem(a, PageSize) @@ -346,6 +369,7 @@ proc llAlloc(a: var MemRegion, size: int): pointer = result = cast[pointer](cast[int](a.llmem) + a.llmem.acc) dec(a.llmem.size, size) inc(a.llmem.acc, size) + unpoison(result, size) zeroMem(result, size) when not defined(gcDestructors): @@ -534,6 +558,7 @@ proc requestOsChunks(a: var MemRegion, size: int): PBigChunk = a.blockChunkSizeIncrease = true else: size = a.nextChunkSize + unpoison(result, sizeof(BigChunk)) incCurrMem(a, size) inc(a.freeMem, size) @@ -613,6 +638,7 @@ proc updatePrevSize(a: var MemRegion, c: PBigChunk, proc splitChunk2(a: var MemRegion, c: PBigChunk, size: int): PBigChunk = result = cast[PBigChunk](cast[int](c) +% size) + unpoison(result, sizeof(BigChunk)) result.size = c.size - size track("result.size", addr result.size, sizeof(int)) when not defined(nimOptimizedSplitChunk): @@ -893,6 +919,7 @@ proc rawAlloc(a: var MemRegion, requestedSize: int): pointer = if c == nil: # There is no free chunk of the requested size available, we need a new one. c = getSmallChunk(a) + unpoison(c, sizeof(SmallChunk)) # init all fields in case memory didn't get zeroed c.freeList = nil c.foreignCells = 0 @@ -911,6 +938,7 @@ proc rawAlloc(a: var MemRegion, requestedSize: int): pointer = # we must not add it to the list if it cannot be used the next time a pointer of `size` bytes is needed. listAdd(a.freeSmallChunks[s], c) result = addr(c.data) + unpoison(result, size) sysAssert((cast[int](result) and (MemAlign-1)) == 0, "rawAlloc 4") else: # There is a free chunk of the requested size available, use it. @@ -923,10 +951,12 @@ proc rawAlloc(a: var MemRegion, requestedSize: int): pointer = sysAssert(c.acc.int + smallChunkOverhead() + size <= SmallChunkSize, "rawAlloc 7") result = cast[pointer](cast[int](addr(c.data)) +% c.acc.int) + unpoison(result, size) inc(c.acc, size) else: # There are free cells available, prefer them over the accumulator result = c.freeList + unpoison(result, size) when not defined(gcDestructors): sysAssert(c.freeList.zeroField == 0, "rawAlloc 8") c.freeList = c.freeList.next @@ -974,6 +1004,7 @@ proc rawAlloc(a: var MemRegion, requestedSize: int): pointer = sysAssert c.prev == nil, "rawAlloc 10" sysAssert c.next == nil, "rawAlloc 11" result = addr(c.data) + unpoison(result, size) sysAssert((cast[int](c) and (MemAlign-1)) == 0, "rawAlloc 13") sysAssert((cast[int](c) and PageMask) == 0, "rawAlloc: Not aligned on a page boundary") when not defined(gcDestructors): @@ -1018,6 +1049,7 @@ proc rawDealloc(a: var MemRegion, p: pointer) = # set to 0xff to check for usage after free bugs: nimSetMem(cast[pointer](cast[int](p) +% sizeof(FreeCell)), -1'i32, s -% sizeof(FreeCell)) + poison(f, s) let activeChunk = a.freeSmallChunks[s div MemAlign] if activeChunk != nil and c != activeChunk: # This pointer is not part of the active chunk, lend it out @@ -1025,12 +1057,14 @@ proc rawDealloc(a: var MemRegion, p: pointer) = # Put the cell into the active chunk, # may prevent a queue of available chunks from forming in a.freeSmallChunks[s div MemAlign]. # This queue would otherwise waste memory in the form of free cells until we return to those chunks. - f.next = activeChunk.freeList + poisonGuard(f, sizeof(FreeCell)): + f.next = activeChunk.freeList activeChunk.freeList = f # lend the cell inc(activeChunk.free, s) # By not adjusting the current chunk's capacity it is prevented from being freed inc(activeChunk.foreignCells) # The cell is now considered foreign from the perspective of the active chunk else: - f.next = c.freeList + poisonGuard(f, sizeof(FreeCell)): + f.next = c.freeList c.freeList = f if c.free < s: # The chunk could not have been active as it didn't have enough space to give