7
7
8
8
#include < tuple>
9
9
10
- namespace NKikimr {
10
+ namespace NKikimr ::NMiniKQL {
11
11
12
- namespace NMiniKQL {
12
+ static ui8 ZeroSizeObject alignas (ArrowAlignment)[0];
13
13
14
14
constexpr ui64 ArrowSizeForArena = (TAllocState::POOL_PAGE_SIZE >> 2 );
15
15
@@ -268,7 +268,14 @@ void TPagedArena::Clear() noexcept {
268
268
}
269
269
}
270
270
271
+ namespace {
272
+
271
273
void * MKQLArrowAllocateOnArena (ui64 size) {
274
+ Y_ENSURE (size);
275
+ // If size is zero we can get in trouble: when `page->Offset == page->Size`.
276
+ // The zero size leads to return `ptr` just after the current page.
277
+ // Then getting start of page for such pointer returns next page - which may be unmapped or unrelevant to `ptr`.
278
+
272
279
TAllocState* state = TlsAllocState;
273
280
Y_ENSURE (state);
274
281
@@ -287,8 +294,8 @@ void* MKQLArrowAllocateOnArena(ui64 size) {
287
294
288
295
page = (TMkqlArrowHeader*)GetAlignedPage ();
289
296
NYql::NUdf::SanitizerMakeRegionAccessible (page, sizeof (TMkqlArrowHeader));
290
- page->Offset = sizeof (TMkqlArrowHeader) ;
291
- page->Size = pageSize;
297
+ page->Offset = 0 ;
298
+ page->Size = pageSize - sizeof (TMkqlArrowHeader); // for consistency with CleanupArrowList()
292
299
page->UseCount = 1 ;
293
300
294
301
if (state->EnableArrowTracking ) {
@@ -299,14 +306,20 @@ void* MKQLArrowAllocateOnArena(ui64 size) {
299
306
}
300
307
}
301
308
302
- void * ptr = (ui8*)page + page->Offset ;
309
+ void * ptr = (ui8*)page + page->Offset + sizeof (TMkqlArrowHeader) ;
303
310
page->Offset += alignedSize;
304
311
++page->UseCount ;
312
+
313
+ Y_DEBUG_ABORT_UNLESS (TAllocState::GetPageStart (ptr) == page);
314
+
305
315
return ptr;
306
316
}
307
317
308
- namespace {
309
318
void * MKQLArrowAllocateImpl (ui64 size) {
319
+ if (Y_UNLIKELY (size == 0 )) {
320
+ return reinterpret_cast <void *>(ZeroSizeObject);
321
+ }
322
+
310
323
if (Y_LIKELY (!TAllocState::IsDefaultAllocatorUsed ())) {
311
324
if (size <= ArrowSizeForArena) {
312
325
return MKQLArrowAllocateOnArena (size);
@@ -345,28 +358,14 @@ void* MKQLArrowAllocateImpl(ui64 size) {
345
358
header->Size = size;
346
359
return header + 1 ;
347
360
}
348
- } // namespace
349
-
350
- void * MKQLArrowAllocate (ui64 size) {
351
- auto sizeWithRedzones = NYql::NUdf::GetSizeToAlloc (size);
352
- void * mem = MKQLArrowAllocateImpl (sizeWithRedzones);
353
- return NYql::NUdf::WrapPointerWithRedZones (mem, sizeWithRedzones);
354
- }
355
-
356
- void * MKQLArrowReallocate (const void * mem, ui64 prevSize, ui64 size) {
357
- auto res = MKQLArrowAllocate (size);
358
- memcpy (res, mem, Min (prevSize, size));
359
- MKQLArrowFree (mem, prevSize);
360
- return res;
361
- }
362
361
363
362
void MKQLArrowFreeOnArena (const void * ptr) {
364
363
auto * page = (TMkqlArrowHeader*)TAllocState::GetPageStart (ptr);
365
364
if (page->UseCount .fetch_sub (1 ) == 1 ) {
366
365
if (!page->Entry .IsUnlinked ()) {
367
366
TAllocState* state = TlsAllocState;
368
367
Y_ENSURE (state);
369
- state->OffloadFree (page->Size );
368
+ state->OffloadFree (page->Size + sizeof (TMkqlArrowHeader) );
370
369
page->Entry .Unlink ();
371
370
372
371
auto it = state->ArrowBuffers .find (page);
@@ -380,8 +379,12 @@ void MKQLArrowFreeOnArena(const void* ptr) {
380
379
return ;
381
380
}
382
381
383
- namespace {
384
382
void MKQLArrowFreeImpl (const void * mem, ui64 size) {
383
+ if (Y_UNLIKELY (mem == reinterpret_cast <const void *>(ZeroSizeObject))) {
384
+ Y_DEBUG_ABORT_UNLESS (size == 0 );
385
+ return ;
386
+ }
387
+
385
388
if (Y_LIKELY (!TAllocState::IsDefaultAllocatorUsed ())) {
386
389
if (size <= ArrowSizeForArena) {
387
390
return MKQLArrowFreeOnArena (mem);
@@ -409,15 +412,34 @@ void MKQLArrowFreeImpl(const void* mem, ui64 size) {
409
412
410
413
ReleaseAlignedPage (header, fullSize);
411
414
}
415
+
412
416
} // namespace
413
417
418
+ void * MKQLArrowAllocate (ui64 size) {
419
+ auto sizeWithRedzones = NYql::NUdf::GetSizeToAlloc (size);
420
+ void * mem = MKQLArrowAllocateImpl (sizeWithRedzones);
421
+ return NYql::NUdf::WrapPointerWithRedZones (mem, sizeWithRedzones);
422
+ }
423
+
424
+ void * MKQLArrowReallocate (const void * mem, ui64 prevSize, ui64 size) {
425
+ auto res = MKQLArrowAllocate (size);
426
+ memcpy (res, mem, Min (prevSize, size));
427
+ MKQLArrowFree (mem, prevSize);
428
+ return res;
429
+ }
430
+
414
431
void MKQLArrowFree (const void * mem, ui64 size) {
415
432
mem = NYql::NUdf::UnwrapPointerWithRedZones (mem, size);
416
433
auto sizeWithRedzones = NYql::NUdf::GetSizeToAlloc (size);
417
434
return MKQLArrowFreeImpl (mem, sizeWithRedzones);
418
435
}
419
436
420
437
void MKQLArrowUntrack (const void * mem, ui64 size) {
438
+ if (Y_UNLIKELY (mem == reinterpret_cast <const void *>(ZeroSizeObject))) {
439
+ Y_DEBUG_ABORT_UNLESS (size == 0 );
440
+ return ;
441
+ }
442
+
421
443
mem = NYql::NUdf::GetOriginalAllocatedObject (mem, size);
422
444
TAllocState* state = TlsAllocState;
423
445
Y_ENSURE (state);
@@ -437,7 +459,7 @@ void MKQLArrowUntrack(const void* mem, ui64 size) {
437
459
if (!page->Entry .IsUnlinked ()) {
438
460
page->Entry .Unlink (); // unlink page immediately so we don't accidentally free untracked memory within `TAllocState`
439
461
state->ArrowBuffers .erase (it);
440
- state->OffloadFree (page->Size );
462
+ state->OffloadFree (page->Size + sizeof (TMkqlArrowHeader) );
441
463
}
442
464
443
465
return ;
@@ -459,6 +481,4 @@ void MKQLArrowUntrack(const void* mem, ui64 size) {
459
481
}
460
482
}
461
483
462
- } // NMiniKQL
463
-
464
- } // NKikimr
484
+ } // namespace NKikimr::NMiniKQL
0 commit comments