@@ -402,9 +402,60 @@ void TTableLocks::RemoveWriteLock(TLockInfo* lock) {
402
402
403
403
// TLockLocker
404
404
405
+ namespace {
406
+
407
+ static constexpr ui64 DefaultLockLimit () {
408
+ // Valgrind and sanitizers are too slow
409
+ // Some tests cannot exhaust default limit in under 5 minutes
410
+ return NValgrind::PlainOrUnderValgrind (
411
+ NSan::PlainOrUnderSanitizer (
412
+ 20000 ,
413
+ 1000 ),
414
+ 1000);
415
+ }
416
+
417
+ static constexpr ui64 DefaultLockRangesLimit () {
418
+ return 50000 ;
419
+ }
420
+
421
+ static constexpr ui64 DefaultTotalRangesLimit () {
422
+ return 1000000 ;
423
+ }
424
+
425
+ static std::atomic<ui64> g_LockLimit{ DefaultLockLimit () };
426
+ static std::atomic<ui64> g_LockRangesLimit{ DefaultLockRangesLimit () };
427
+ static std::atomic<ui64> g_TotalRangesLimit{ DefaultTotalRangesLimit () };
428
+
429
+ } // namespace
430
+
431
+ ui64 TLockLocker::LockLimit () {
432
+ return g_LockLimit.load (std::memory_order_relaxed);
433
+ }
434
+
435
+ ui64 TLockLocker::LockRangesLimit () {
436
+ return g_LockRangesLimit.load (std::memory_order_relaxed);
437
+ }
438
+
439
+ ui64 TLockLocker::TotalRangesLimit () {
440
+ return g_TotalRangesLimit.load (std::memory_order_relaxed);
441
+ }
442
+
443
+ void TLockLocker::SetLockLimit (ui64 newLimit) {
444
+ g_LockLimit.store (newLimit, std::memory_order_relaxed);
445
+ }
446
+
447
+ void TLockLocker::SetLockRangesLimit (ui64 newLimit) {
448
+ g_LockRangesLimit.store (newLimit, std::memory_order_relaxed);
449
+ }
450
+
451
+ void TLockLocker::SetTotalRangesLimit (ui64 newLimit) {
452
+ g_TotalRangesLimit.store (newLimit, std::memory_order_relaxed);
453
+ }
454
+
405
455
void TLockLocker::AddPointLock (const TLockInfo::TPtr& lock, const TPointKey& key) {
406
456
if (lock->AddPoint (key)) {
407
457
key.Table ->AddPointLock (key, lock.Get ());
458
+ LocksWithRanges.PushBack (lock.Get ());
408
459
} else {
409
460
key.Table ->AddShardLock (lock.Get ());
410
461
}
@@ -413,21 +464,27 @@ void TLockLocker::AddPointLock(const TLockInfo::TPtr& lock, const TPointKey& key
413
464
void TLockLocker::AddRangeLock (const TLockInfo::TPtr& lock, const TRangeKey& key) {
414
465
if (lock->AddRange (key)) {
415
466
key.Table ->AddRangeLock (key, lock.Get ());
467
+ LocksWithRanges.PushBack (lock.Get ());
416
468
} else {
417
469
key.Table ->AddShardLock (lock.Get ());
418
470
}
419
471
}
420
472
421
- void TLockLocker::AddShardLock ( const TLockInfo::TPtr& lock, TIntrusiveList<TTableLocks, TTableLocksReadListTag>& readTables ) {
473
+ void TLockLocker::MakeShardLock ( TLockInfo* lock) {
422
474
if (!lock->IsShardLock ()) {
423
475
for (const TPathId& tableId : lock->GetReadTables ()) {
424
- Tables.at (tableId)->RemoveRangeLock (lock. Get () );
476
+ Tables.at (tableId)->RemoveRangeLock (lock);
425
477
}
426
478
lock->MakeShardLock ();
479
+ LocksWithRanges.Remove (lock);
427
480
for (const TPathId& tableId : lock->GetReadTables ()) {
428
- Tables.at (tableId)->AddShardLock (lock. Get () );
481
+ Tables.at (tableId)->AddShardLock (lock);
429
482
}
430
483
}
484
+ }
485
+
486
+ void TLockLocker::AddShardLock (const TLockInfo::TPtr& lock, TIntrusiveList<TTableLocks, TTableLocksReadListTag>& readTables) {
487
+ MakeShardLock (lock.Get ());
431
488
for (auto & table : readTables) {
432
489
const TPathId& tableId = table.GetTableId ();
433
490
Y_ABORT_UNLESS (Tables.at (tableId).Get () == &table);
@@ -519,6 +576,9 @@ void TLockLocker::RemoveBrokenRanges() {
519
576
TLockInfo::TPtr TLockLocker::GetOrAddLock (ui64 lockId, ui32 lockNodeId) {
520
577
auto it = Locks.find (lockId);
521
578
if (it != Locks.end ()) {
579
+ if (it->second ->IsInList <TLockInfoRangesListTag>()) {
580
+ LocksWithRanges.PushBack (it->second .Get ());
581
+ }
522
582
if (it->second ->IsInList <TLockInfoExpireListTag>()) {
523
583
ExpireQueue.PushBack (it->second .Get ());
524
584
}
@@ -591,6 +651,7 @@ void TLockLocker::RemoveOneLock(ui64 lockTxId, ILocksDb* db) {
591
651
for (const TPathId& tableId : txLock->GetWriteTables ()) {
592
652
Tables.at (tableId)->RemoveWriteLock (txLock.Get ());
593
653
}
654
+ LocksWithRanges.Remove (txLock.Get ());
594
655
txLock->CleanupConflicts ();
595
656
Locks.erase (it);
596
657
@@ -634,6 +695,7 @@ void TLockLocker::RemoveSchema(const TPathId& tableId, ILocksDb* db) {
634
695
Y_ABORT_UNLESS (Tables.empty ());
635
696
Locks.clear ();
636
697
ShardLocks.clear ();
698
+ LocksWithRanges.Clear ();
637
699
ExpireQueue.Clear ();
638
700
BrokenLocks.Clear ();
639
701
BrokenPersistentLocks.Clear ();
@@ -643,21 +705,41 @@ void TLockLocker::RemoveSchema(const TPathId& tableId, ILocksDb* db) {
643
705
PendingSubscribeLocks.clear ();
644
706
}
645
707
646
- bool TLockLocker::ForceShardLock (const TPathId& tableId) const {
647
- auto it = Tables.find (tableId);
648
- if (it != Tables.end ()) {
649
- if (it->second ->RangeCount () > LockLimit ()) {
650
- return true ;
651
- }
708
+ bool TLockLocker::ForceShardLock (
709
+ const TLockInfo::TPtr& lock,
710
+ const TIntrusiveList<TTableLocks, TTableLocksReadListTag>& readTables,
711
+ ui64 newRanges)
712
+ {
713
+ if (lock->NumPoints () + lock->NumRanges () + newRanges > LockRangesLimit ()) {
714
+ // Lock has too many ranges, will never fit in
715
+ return true ;
652
716
}
653
- return false ;
654
- }
655
717
656
- bool TLockLocker::ForceShardLock (const TIntrusiveList<TTableLocks, TTableLocksReadListTag>& readTables) const {
657
718
for (auto & table : readTables) {
658
- if (table.RangeCount () > LockLimit ())
659
- return true ;
719
+ while (table.RangeCount () + newRanges > TotalRangesLimit ()) {
720
+ if (LocksWithRanges.Empty ()) {
721
+ // Too many new ranges (e.g. TotalRangesLimit < LockRangesLimit)
722
+ return true ;
723
+ }
724
+
725
+ // Try to reduce the number of ranges until new ranges fit in
726
+ TLockInfo* next = LocksWithRanges.PopFront ();
727
+ if (next == lock.Get ()) {
728
+ bool wasLast = LocksWithRanges.Empty ();
729
+ LocksWithRanges.PushBack (next);
730
+ if (wasLast) {
731
+ return true ;
732
+ }
733
+ // We want to handle the newest lock last
734
+ continue ;
735
+ }
736
+
737
+ // Reduce the number of ranges by making the oldest lock into a shard lock
738
+ MakeShardLock (next);
739
+ Self->IncCounter (COUNTER_LOCKS_WHOLE_SHARD);
740
+ }
660
741
}
742
+
661
743
return false ;
662
744
}
663
745
@@ -771,8 +853,6 @@ TVector<TSysLocks::TLock> TSysLocks::ApplyLocks() {
771
853
return TVector<TLock>();
772
854
}
773
855
774
- bool shardLock = Locker.ForceShardLock (Update->ReadTables );
775
-
776
856
TLockInfo::TPtr lock;
777
857
ui64 counter = TLock::ErrorNotSet;
778
858
@@ -791,6 +871,12 @@ TVector<TSysLocks::TLock> TSysLocks::ApplyLocks() {
791
871
} else if (lock->IsBroken ()) {
792
872
counter = TLock::ErrorBroken;
793
873
} else {
874
+ bool shardLock = (
875
+ lock->IsShardLock () ||
876
+ Locker.ForceShardLock (
877
+ lock,
878
+ Update->ReadTables ,
879
+ Update->PointLocks .size () + Update->RangeLocks .size ()));
794
880
if (shardLock) {
795
881
Locker.AddShardLock (lock, Update->ReadTables );
796
882
Self->IncCounter (COUNTER_LOCKS_WHOLE_SHARD);
0 commit comments