Skip to content

Commit 7b5e019

Browse files
author
ilezhankin
committed
Fix returning pointer for zero-sized allocation outside of page
The problematic scenario looks like this: - Allocate new page on arena - Return pointer for the last piece of the page - so `offset == size` - Try to allocate zero-sized segment - since `offset + 0 <= size` we return pointer to `page + offset` - `GetStartOfPage(page + offset)` returns the next page - and it leads to malicious behavior for the next (probably unmapped) page Now we don't allow to physically allocate zero-sized region and use aligned stub object. commit_hash:5fa77d6bd78c7f712f35da943fcfe9023f78ec5e
1 parent e2889c9 commit 7b5e019

File tree

2 files changed

+91
-31
lines changed

2 files changed

+91
-31
lines changed

yql/essentials/minikql/mkql_alloc.cpp

Lines changed: 46 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -7,9 +7,9 @@
77

88
#include <tuple>
99

10-
namespace NKikimr {
10+
namespace NKikimr::NMiniKQL {
1111

12-
namespace NMiniKQL {
12+
static ui8 ZeroSizeObject alignas(ArrowAlignment)[0];
1313

1414
constexpr ui64 ArrowSizeForArena = (TAllocState::POOL_PAGE_SIZE >> 2);
1515

@@ -268,7 +268,14 @@ void TPagedArena::Clear() noexcept {
268268
}
269269
}
270270

271+
namespace {
272+
271273
void* MKQLArrowAllocateOnArena(ui64 size) {
274+
Y_ENSURE(size);
275+
// If size is zero we can get in trouble: when `page->Offset == page->Size`.
276+
// The zero size leads to return `ptr` just after the current page.
277+
// Then getting start of page for such pointer returns next page - which may be unmapped or unrelevant to `ptr`.
278+
272279
TAllocState* state = TlsAllocState;
273280
Y_ENSURE(state);
274281

@@ -287,8 +294,8 @@ void* MKQLArrowAllocateOnArena(ui64 size) {
287294

288295
page = (TMkqlArrowHeader*)GetAlignedPage();
289296
NYql::NUdf::SanitizerMakeRegionAccessible(page, sizeof(TMkqlArrowHeader));
290-
page->Offset = sizeof(TMkqlArrowHeader);
291-
page->Size = pageSize;
297+
page->Offset = 0;
298+
page->Size = pageSize - sizeof(TMkqlArrowHeader); // for consistency with CleanupArrowList()
292299
page->UseCount = 1;
293300

294301
if (state->EnableArrowTracking) {
@@ -299,14 +306,20 @@ void* MKQLArrowAllocateOnArena(ui64 size) {
299306
}
300307
}
301308

302-
void* ptr = (ui8*)page + page->Offset;
309+
void* ptr = (ui8*)page + page->Offset + sizeof(TMkqlArrowHeader);
303310
page->Offset += alignedSize;
304311
++page->UseCount;
312+
313+
Y_DEBUG_ABORT_UNLESS(TAllocState::GetPageStart(ptr) == page);
314+
305315
return ptr;
306316
}
307317

308-
namespace {
309318
void* MKQLArrowAllocateImpl(ui64 size) {
319+
if (Y_UNLIKELY(size == 0)) {
320+
return reinterpret_cast<void*>(ZeroSizeObject);
321+
}
322+
310323
if (Y_LIKELY(!TAllocState::IsDefaultAllocatorUsed())) {
311324
if (size <= ArrowSizeForArena) {
312325
return MKQLArrowAllocateOnArena(size);
@@ -345,28 +358,14 @@ void* MKQLArrowAllocateImpl(ui64 size) {
345358
header->Size = size;
346359
return header + 1;
347360
}
348-
} // namespace
349-
350-
void* MKQLArrowAllocate(ui64 size) {
351-
auto sizeWithRedzones = NYql::NUdf::GetSizeToAlloc(size);
352-
void* mem = MKQLArrowAllocateImpl(sizeWithRedzones);
353-
return NYql::NUdf::WrapPointerWithRedZones(mem, sizeWithRedzones);
354-
}
355-
356-
void* MKQLArrowReallocate(const void* mem, ui64 prevSize, ui64 size) {
357-
auto res = MKQLArrowAllocate(size);
358-
memcpy(res, mem, Min(prevSize, size));
359-
MKQLArrowFree(mem, prevSize);
360-
return res;
361-
}
362361

363362
void MKQLArrowFreeOnArena(const void* ptr) {
364363
auto* page = (TMkqlArrowHeader*)TAllocState::GetPageStart(ptr);
365364
if (page->UseCount.fetch_sub(1) == 1) {
366365
if (!page->Entry.IsUnlinked()) {
367366
TAllocState* state = TlsAllocState;
368367
Y_ENSURE(state);
369-
state->OffloadFree(page->Size);
368+
state->OffloadFree(page->Size + sizeof(TMkqlArrowHeader));
370369
page->Entry.Unlink();
371370

372371
auto it = state->ArrowBuffers.find(page);
@@ -380,8 +379,12 @@ void MKQLArrowFreeOnArena(const void* ptr) {
380379
return;
381380
}
382381

383-
namespace {
384382
void MKQLArrowFreeImpl(const void* mem, ui64 size) {
383+
if (Y_UNLIKELY(mem == reinterpret_cast<const void*>(ZeroSizeObject))) {
384+
Y_DEBUG_ABORT_UNLESS(size == 0);
385+
return;
386+
}
387+
385388
if (Y_LIKELY(!TAllocState::IsDefaultAllocatorUsed())) {
386389
if (size <= ArrowSizeForArena) {
387390
return MKQLArrowFreeOnArena(mem);
@@ -409,15 +412,34 @@ void MKQLArrowFreeImpl(const void* mem, ui64 size) {
409412

410413
ReleaseAlignedPage(header, fullSize);
411414
}
415+
412416
} // namespace
413417

418+
void* MKQLArrowAllocate(ui64 size) {
419+
auto sizeWithRedzones = NYql::NUdf::GetSizeToAlloc(size);
420+
void* mem = MKQLArrowAllocateImpl(sizeWithRedzones);
421+
return NYql::NUdf::WrapPointerWithRedZones(mem, sizeWithRedzones);
422+
}
423+
424+
void* MKQLArrowReallocate(const void* mem, ui64 prevSize, ui64 size) {
425+
auto res = MKQLArrowAllocate(size);
426+
memcpy(res, mem, Min(prevSize, size));
427+
MKQLArrowFree(mem, prevSize);
428+
return res;
429+
}
430+
414431
void MKQLArrowFree(const void* mem, ui64 size) {
415432
mem = NYql::NUdf::UnwrapPointerWithRedZones(mem, size);
416433
auto sizeWithRedzones = NYql::NUdf::GetSizeToAlloc(size);
417434
return MKQLArrowFreeImpl(mem, sizeWithRedzones);
418435
}
419436

420437
void MKQLArrowUntrack(const void* mem, ui64 size) {
438+
if (Y_UNLIKELY(mem == reinterpret_cast<const void*>(ZeroSizeObject))) {
439+
Y_DEBUG_ABORT_UNLESS(size == 0);
440+
return;
441+
}
442+
421443
mem = NYql::NUdf::GetOriginalAllocatedObject(mem, size);
422444
TAllocState* state = TlsAllocState;
423445
Y_ENSURE(state);
@@ -437,7 +459,7 @@ void MKQLArrowUntrack(const void* mem, ui64 size) {
437459
if (!page->Entry.IsUnlinked()) {
438460
page->Entry.Unlink(); // unlink page immediately so we don't accidentally free untracked memory within `TAllocState`
439461
state->ArrowBuffers.erase(it);
440-
state->OffloadFree(page->Size);
462+
state->OffloadFree(page->Size + sizeof(TMkqlArrowHeader));
441463
}
442464

443465
return;
@@ -459,6 +481,4 @@ void MKQLArrowUntrack(const void* mem, ui64 size) {
459481
}
460482
}
461483

462-
} // NMiniKQL
463-
464-
} // NKikimr
484+
} // namespace NKikimr::NMiniKQL

yql/essentials/minikql/mkql_alloc_ut.cpp

Lines changed: 45 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -2,8 +2,7 @@
22

33
#include <library/cpp/testing/unittest/registar.h>
44

5-
namespace NKikimr {
6-
namespace NMiniKQL {
5+
namespace NKikimr::NMiniKQL {
76

87
Y_UNIT_TEST_SUITE(TMiniKQLAllocTest) {
98
Y_UNIT_TEST(TestPagedArena) {
@@ -73,7 +72,7 @@ Y_UNIT_TEST_SUITE(TMiniKQLAllocTest) {
7372
}
7473
TWithDefaultMiniKQLAlloc::FreeWithSize(p2, 10);
7574
}
76-
Y_UNIT_TEST(InitiallyAcqured) {
75+
Y_UNIT_TEST(InitiallyAcquired) {
7776
{
7877
TScopedAlloc alloc(__LOCATION__);
7978
UNIT_ASSERT_VALUES_EQUAL(true, alloc.IsAttached());
@@ -93,7 +92,48 @@ Y_UNIT_TEST_SUITE(TMiniKQLAllocTest) {
9392
UNIT_ASSERT_VALUES_EQUAL(false, alloc.IsAttached());
9493
}
9594
}
96-
}
95+
Y_UNIT_TEST(ArrowAllocateZeroSize) {
96+
// Choose small enough pieces to hit arena (using some internal knowledge)
97+
const auto pieceSize = AlignUp<size_t>(1, ArrowAlignment);
98+
UNIT_ASSERT_EQUAL(0, (TAllocState::POOL_PAGE_SIZE - sizeof(TMkqlArrowHeader)) % pieceSize);
99+
const auto pieceCount = (TAllocState::POOL_PAGE_SIZE - sizeof(TMkqlArrowHeader)) / pieceSize;
100+
void** ptrs = new void*[pieceCount];
97101

102+
// Populate the current page on arena to maximum offset
103+
TScopedAlloc alloc(__LOCATION__);
104+
for (auto i = 0ul; i < pieceCount; ++i) {
105+
ptrs[i] = MKQLArrowAllocate(pieceSize);
106+
}
107+
108+
// Check all pieces are on the same page
109+
void* pageStart = TAllocState::GetPageStart(ptrs[0]);
110+
for (auto i = 1ul; i < pieceCount; ++i) {
111+
UNIT_ASSERT_VALUES_EQUAL(pageStart, TAllocState::GetPageStart(ptrs[i]));
112+
}
113+
114+
// Allocate zero-sized piece twice and check it's the same address
115+
void* ptrZero1 = MKQLArrowAllocate(0);
116+
void* ptrZero2 = MKQLArrowAllocate(0);
117+
UNIT_ASSERT_VALUES_EQUAL(ptrZero1, ptrZero2);
118+
119+
// Allocate one more small piece and check that it's on another page - different from zero-sized piece
120+
void* ptrOne = MKQLArrowAllocate(1);
121+
UNIT_ASSERT_VALUES_UNEQUAL(pageStart, TAllocState::GetPageStart(ptrOne));
122+
UNIT_ASSERT_VALUES_UNEQUAL(TAllocState::GetPageStart(ptrZero1), TAllocState::GetPageStart(ptrOne));
123+
124+
// Untrack zero-sized piece
125+
MKQLArrowUntrack(ptrZero1, 0);
126+
127+
// Deallocate all the stuff
128+
for (auto i = 0ul; i < pieceCount; ++i) {
129+
MKQLArrowFree(ptrs[i], pieceSize);
130+
}
131+
MKQLArrowFree(ptrZero1, 0);
132+
MKQLArrowFree(ptrZero2, 0);
133+
MKQLArrowFree(ptrOne, 1);
134+
135+
delete[] ptrs;
136+
}
98137
}
99-
}
138+
139+
} // namespace NKikimr::NMiniKQL

0 commit comments

Comments
 (0)