Skip to content

Commit 652707a

Browse files
authored
[nsan] Use sanitizer allocator
* The performance is better than the glibc allocator. * Allocator interface functions, sanitizer allocator options, and MallocHooks/FreeHooks are supported. * Shadow memory has specific memory layout requirement. Using libc allocator could lead to conflicts. * When we add a mmap interceptor for reliability (the VMA could reuse a previously released VMA that is poisoned): glibc may invoke an internal system call to call unmmap, which cannot be intercepted. We will not be able to return the shadow memory to the OS. Similar to dfsan https://reviews.llvm.org/D101204 . Also intercept operator new/delete to be similar to other sanitizers using the sanitizer allocator. The align_val_t overload of operator new has slightly less overhead. Pull Request: #102764
1 parent 91c3a71 commit 652707a

15 files changed

+759
-44
lines changed

compiler-rt/lib/nsan/CMakeLists.txt

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,9 +4,11 @@ include_directories(..)
44

55
set(NSAN_SOURCES
66
nsan.cpp
7+
nsan_allocator.cpp
78
nsan_flags.cpp
89
nsan_interceptors.cpp
910
nsan_malloc_linux.cpp
11+
nsan_new_delete.cpp
1012
nsan_stats.cpp
1113
nsan_suppressions.cpp
1214
nsan_thread.cpp

compiler-rt/lib/nsan/nsan.cpp

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -807,18 +807,20 @@ extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __nsan_init() {
807807
if (nsan_initialized)
808808
return;
809809
nsan_init_is_running = true;
810+
SanitizerToolName = "NumericalStabilitySanitizer";
810811

811812
InitializeFlags();
812813
InitializeSuppressions();
813814
InitializePlatformEarly();
814815

815816
DisableCoreDumperIfNecessary();
816817

817-
if (!MmapFixedNoReserve(TypesAddr(), UnusedAddr() - TypesAddr()))
818+
if (!MmapFixedNoReserve(TypesAddr(), AllocatorAddr() - TypesAddr()))
818819
Die();
819820

820821
InitializeInterceptors();
821822
NsanTSDInit(NsanTSDDtor);
823+
NsanAllocatorInit();
822824

823825
NsanThread *main_thread = NsanThread::Create(nullptr, nullptr);
824826
SetCurrentThread(main_thread);

compiler-rt/lib/nsan/nsan.h

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,14 @@ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE const char *
5151
__nsan_default_options();
5252
}
5353

54+
// Unwind the stack for fatal error, as the parameter `stack` is
55+
// empty without origins.
56+
#define GET_FATAL_STACK_TRACE_IF_EMPTY(STACK) \
57+
if (nsan_initialized && (STACK)->size == 0) { \
58+
(STACK)->Unwind(StackTrace::GetCurrentPc(), GET_CURRENT_FRAME(), nullptr, \
59+
common_flags()->fast_unwind_on_fatal); \
60+
}
61+
5462
namespace __nsan {
5563

5664
extern bool nsan_initialized;
Lines changed: 340 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,340 @@
1+
//===- nsan_allocator.cpp -------------------------------------------------===//
2+
//
3+
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4+
// See https://llvm.org/LICENSE.txt for license information.
5+
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6+
//
7+
//===----------------------------------------------------------------------===//
8+
//
9+
// NumericalStabilitySanitizer allocator.
10+
//
11+
//===----------------------------------------------------------------------===//
12+
13+
#include "nsan_allocator.h"
14+
#include "interception/interception.h"
15+
#include "nsan.h"
16+
#include "nsan_flags.h"
17+
#include "nsan_platform.h"
18+
#include "nsan_thread.h"
19+
#include "sanitizer_common/sanitizer_allocator.h"
20+
#include "sanitizer_common/sanitizer_allocator_checks.h"
21+
#include "sanitizer_common/sanitizer_allocator_interface.h"
22+
#include "sanitizer_common/sanitizer_allocator_report.h"
23+
#include "sanitizer_common/sanitizer_common.h"
24+
#include "sanitizer_common/sanitizer_errno.h"
25+
26+
using namespace __nsan;
27+
28+
DECLARE_REAL(void *, memcpy, void *dest, const void *src, uptr n)
29+
DECLARE_REAL(void *, memset, void *dest, int c, uptr n)
30+
31+
namespace {
32+
struct Metadata {
33+
uptr requested_size;
34+
};
35+
36+
struct NsanMapUnmapCallback {
37+
void OnMap(uptr p, uptr size) const {}
38+
void OnMapSecondary(uptr p, uptr size, uptr user_begin,
39+
uptr user_size) const {}
40+
void OnUnmap(uptr p, uptr size) const {}
41+
};
42+
43+
const uptr kMaxAllowedMallocSize = 1ULL << 40;
44+
45+
// Allocator64 parameters. Deliberately using a short name.
46+
struct AP64 {
47+
static const uptr kSpaceBeg = Mapping::kHeapMemBeg;
48+
static const uptr kSpaceSize = 0x40000000000; // 4T.
49+
static const uptr kMetadataSize = sizeof(Metadata);
50+
using SizeClassMap = DefaultSizeClassMap;
51+
using MapUnmapCallback = NsanMapUnmapCallback;
52+
static const uptr kFlags = 0;
53+
using AddressSpaceView = LocalAddressSpaceView;
54+
};
55+
} // namespace
56+
57+
using PrimaryAllocator = SizeClassAllocator64<AP64>;
58+
using Allocator = CombinedAllocator<PrimaryAllocator>;
59+
using AllocatorCache = Allocator::AllocatorCache;
60+
61+
static Allocator allocator;
62+
static AllocatorCache fallback_allocator_cache;
63+
static StaticSpinMutex fallback_mutex;
64+
65+
static uptr max_malloc_size;
66+
67+
void __nsan::NsanAllocatorInit() {
68+
SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
69+
allocator.Init(common_flags()->allocator_release_to_os_interval_ms);
70+
if (common_flags()->max_allocation_size_mb)
71+
max_malloc_size = Min(common_flags()->max_allocation_size_mb << 20,
72+
kMaxAllowedMallocSize);
73+
else
74+
max_malloc_size = kMaxAllowedMallocSize;
75+
}
76+
77+
static AllocatorCache *GetAllocatorCache(NsanThreadLocalMallocStorage *ms) {
78+
CHECK_LE(sizeof(AllocatorCache), sizeof(ms->allocator_cache));
79+
return reinterpret_cast<AllocatorCache *>(ms->allocator_cache);
80+
}
81+
82+
void NsanThreadLocalMallocStorage::Init() {
83+
allocator.InitCache(GetAllocatorCache(this));
84+
}
85+
86+
void NsanThreadLocalMallocStorage::CommitBack() {
87+
allocator.SwallowCache(GetAllocatorCache(this));
88+
allocator.DestroyCache(GetAllocatorCache(this));
89+
}
90+
91+
static void *NsanAllocate(uptr size, uptr alignment, bool zero) {
92+
if (UNLIKELY(size > max_malloc_size)) {
93+
if (AllocatorMayReturnNull()) {
94+
Report("WARNING: NumericalStabilitySanitizer failed to allocate 0x%zx "
95+
"bytes\n",
96+
size);
97+
return nullptr;
98+
}
99+
BufferedStackTrace stack;
100+
GET_FATAL_STACK_TRACE_IF_EMPTY(&stack);
101+
ReportAllocationSizeTooBig(size, max_malloc_size, &stack);
102+
}
103+
if (UNLIKELY(IsRssLimitExceeded())) {
104+
if (AllocatorMayReturnNull())
105+
return nullptr;
106+
BufferedStackTrace stack;
107+
GET_FATAL_STACK_TRACE_IF_EMPTY(&stack);
108+
ReportRssLimitExceeded(&stack);
109+
}
110+
111+
void *allocated;
112+
if (NsanThread *t = GetCurrentThread()) {
113+
AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
114+
allocated = allocator.Allocate(cache, size, alignment);
115+
} else {
116+
SpinMutexLock l(&fallback_mutex);
117+
AllocatorCache *cache = &fallback_allocator_cache;
118+
allocated = allocator.Allocate(cache, size, alignment);
119+
}
120+
if (UNLIKELY(!allocated)) {
121+
SetAllocatorOutOfMemory();
122+
if (AllocatorMayReturnNull())
123+
return nullptr;
124+
BufferedStackTrace stack;
125+
GET_FATAL_STACK_TRACE_IF_EMPTY(&stack);
126+
ReportOutOfMemory(size, &stack);
127+
}
128+
auto *meta = reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
129+
meta->requested_size = size;
130+
if (zero && allocator.FromPrimary(allocated))
131+
REAL(memset)(allocated, 0, size);
132+
__nsan_set_value_unknown(allocated, size);
133+
RunMallocHooks(allocated, size);
134+
return allocated;
135+
}
136+
137+
void __nsan::NsanDeallocate(void *p) {
138+
DCHECK(p);
139+
RunFreeHooks(p);
140+
auto *meta = reinterpret_cast<Metadata *>(allocator.GetMetaData(p));
141+
uptr size = meta->requested_size;
142+
meta->requested_size = 0;
143+
if (flags().poison_in_free)
144+
__nsan_set_value_unknown(p, size);
145+
if (NsanThread *t = GetCurrentThread()) {
146+
AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
147+
allocator.Deallocate(cache, p);
148+
} else {
149+
// In a just created thread, glibc's _dl_deallocate_tls might reach here
150+
// before nsan_current_thread is set.
151+
SpinMutexLock l(&fallback_mutex);
152+
AllocatorCache *cache = &fallback_allocator_cache;
153+
allocator.Deallocate(cache, p);
154+
}
155+
}
156+
157+
static void *NsanReallocate(void *ptr, uptr new_size, uptr alignment) {
158+
Metadata *meta = reinterpret_cast<Metadata *>(allocator.GetMetaData(ptr));
159+
uptr old_size = meta->requested_size;
160+
uptr actually_allocated_size = allocator.GetActuallyAllocatedSize(ptr);
161+
if (new_size <= actually_allocated_size) {
162+
// We are not reallocating here.
163+
meta->requested_size = new_size;
164+
if (new_size > old_size)
165+
__nsan_set_value_unknown((u8 *)ptr + old_size, new_size - old_size);
166+
return ptr;
167+
}
168+
void *new_p = NsanAllocate(new_size, alignment, false);
169+
if (new_p) {
170+
uptr memcpy_size = Min(new_size, old_size);
171+
REAL(memcpy)(new_p, ptr, memcpy_size);
172+
__nsan_copy_values(new_p, ptr, memcpy_size);
173+
NsanDeallocate(ptr);
174+
}
175+
return new_p;
176+
}
177+
178+
static void *NsanCalloc(uptr nmemb, uptr size) {
179+
if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
180+
if (AllocatorMayReturnNull())
181+
return nullptr;
182+
BufferedStackTrace stack;
183+
GET_FATAL_STACK_TRACE_IF_EMPTY(&stack);
184+
ReportCallocOverflow(nmemb, size, &stack);
185+
}
186+
return NsanAllocate(nmemb * size, sizeof(u64), true);
187+
}
188+
189+
static const void *AllocationBegin(const void *p) {
190+
if (!p)
191+
return nullptr;
192+
void *beg = allocator.GetBlockBegin(p);
193+
if (!beg)
194+
return nullptr;
195+
auto *b = reinterpret_cast<Metadata *>(allocator.GetMetaData(beg));
196+
if (!b)
197+
return nullptr;
198+
if (b->requested_size == 0)
199+
return nullptr;
200+
201+
return beg;
202+
}
203+
204+
static uptr AllocationSizeFast(const void *p) {
205+
return reinterpret_cast<Metadata *>(allocator.GetMetaData(p))->requested_size;
206+
}
207+
208+
static uptr AllocationSize(const void *p) {
209+
if (!p)
210+
return 0;
211+
if (allocator.GetBlockBegin(p) != p)
212+
return 0;
213+
return AllocationSizeFast(p);
214+
}
215+
216+
void *__nsan::nsan_malloc(uptr size) {
217+
return SetErrnoOnNull(NsanAllocate(size, sizeof(u64), false));
218+
}
219+
220+
void *__nsan::nsan_calloc(uptr nmemb, uptr size) {
221+
return SetErrnoOnNull(NsanCalloc(nmemb, size));
222+
}
223+
224+
void *__nsan::nsan_realloc(void *ptr, uptr size) {
225+
if (!ptr)
226+
return SetErrnoOnNull(NsanAllocate(size, sizeof(u64), false));
227+
if (size == 0) {
228+
NsanDeallocate(ptr);
229+
return nullptr;
230+
}
231+
return SetErrnoOnNull(NsanReallocate(ptr, size, sizeof(u64)));
232+
}
233+
234+
void *__nsan::nsan_reallocarray(void *ptr, uptr nmemb, uptr size) {
235+
if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
236+
errno = errno_ENOMEM;
237+
if (AllocatorMayReturnNull())
238+
return nullptr;
239+
BufferedStackTrace stack;
240+
GET_FATAL_STACK_TRACE_IF_EMPTY(&stack);
241+
ReportReallocArrayOverflow(nmemb, size, &stack);
242+
}
243+
return nsan_realloc(ptr, nmemb * size);
244+
}
245+
246+
void *__nsan::nsan_valloc(uptr size) {
247+
return SetErrnoOnNull(NsanAllocate(size, GetPageSizeCached(), false));
248+
}
249+
250+
void *__nsan::nsan_pvalloc(uptr size) {
251+
uptr PageSize = GetPageSizeCached();
252+
if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
253+
errno = errno_ENOMEM;
254+
if (AllocatorMayReturnNull())
255+
return nullptr;
256+
BufferedStackTrace stack;
257+
GET_FATAL_STACK_TRACE_IF_EMPTY(&stack);
258+
ReportPvallocOverflow(size, &stack);
259+
}
260+
// pvalloc(0) should allocate one page.
261+
size = size ? RoundUpTo(size, PageSize) : PageSize;
262+
return SetErrnoOnNull(NsanAllocate(size, PageSize, false));
263+
}
264+
265+
void *__nsan::nsan_aligned_alloc(uptr alignment, uptr size) {
266+
if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
267+
errno = errno_EINVAL;
268+
if (AllocatorMayReturnNull())
269+
return nullptr;
270+
BufferedStackTrace stack;
271+
GET_FATAL_STACK_TRACE_IF_EMPTY(&stack);
272+
ReportInvalidAlignedAllocAlignment(size, alignment, &stack);
273+
}
274+
return SetErrnoOnNull(NsanAllocate(size, alignment, false));
275+
}
276+
277+
void *__nsan::nsan_memalign(uptr alignment, uptr size) {
278+
if (UNLIKELY(!IsPowerOfTwo(alignment))) {
279+
errno = errno_EINVAL;
280+
if (AllocatorMayReturnNull())
281+
return nullptr;
282+
BufferedStackTrace stack;
283+
GET_FATAL_STACK_TRACE_IF_EMPTY(&stack);
284+
ReportInvalidAllocationAlignment(alignment, &stack);
285+
}
286+
return SetErrnoOnNull(NsanAllocate(size, alignment, false));
287+
}
288+
289+
int __nsan::nsan_posix_memalign(void **memptr, uptr alignment, uptr size) {
290+
if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
291+
if (AllocatorMayReturnNull())
292+
return errno_EINVAL;
293+
BufferedStackTrace stack;
294+
ReportInvalidPosixMemalignAlignment(alignment, &stack);
295+
}
296+
void *ptr = NsanAllocate(size, alignment, false);
297+
if (UNLIKELY(!ptr))
298+
// OOM error is already taken care of by NsanAllocate.
299+
return errno_ENOMEM;
300+
DCHECK(IsAligned((uptr)ptr, alignment));
301+
*memptr = ptr;
302+
return 0;
303+
}
304+
305+
extern "C" {
306+
uptr __sanitizer_get_current_allocated_bytes() {
307+
uptr stats[AllocatorStatCount];
308+
allocator.GetStats(stats);
309+
return stats[AllocatorStatAllocated];
310+
}
311+
312+
uptr __sanitizer_get_heap_size() {
313+
uptr stats[AllocatorStatCount];
314+
allocator.GetStats(stats);
315+
return stats[AllocatorStatMapped];
316+
}
317+
318+
uptr __sanitizer_get_free_bytes() { return 1; }
319+
320+
uptr __sanitizer_get_unmapped_bytes() { return 1; }
321+
322+
uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
323+
324+
int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; }
325+
326+
const void *__sanitizer_get_allocated_begin(const void *p) {
327+
return AllocationBegin(p);
328+
}
329+
330+
uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); }
331+
332+
uptr __sanitizer_get_allocated_size_fast(const void *p) {
333+
DCHECK_EQ(p, __sanitizer_get_allocated_begin(p));
334+
uptr ret = AllocationSizeFast(p);
335+
DCHECK_EQ(ret, __sanitizer_get_allocated_size(p));
336+
return ret;
337+
}
338+
339+
void __sanitizer_purge_allocator() { allocator.ForceReleaseToOS(); }
340+
}

0 commit comments

Comments
 (0)