|
2 | 2 | #include "datashard_ut_common_kqp.h"
|
3 | 3 | #include "datashard_ut_read_table.h"
|
4 | 4 |
|
| 5 | +#include <ydb/core/testlib/actors/block_events.h> |
5 | 6 | #include <ydb/library/actors/core/mon.h>
|
6 | 7 |
|
7 | 8 | namespace NKikimr {
|
@@ -720,6 +721,71 @@ Y_UNIT_TEST_SUITE(DataShardFollowers) {
|
720 | 721 | UNIT_ASSERT_EQUAL(readDataPages, 3);
|
721 | 722 | }
|
722 | 723 |
|
| 724 | + Y_UNIT_TEST(FollowerReadDuringSplit) { |
| 725 | + TPortManager pm; |
| 726 | + TServerSettings serverSettings(pm.GetPort(2134)); |
| 727 | + serverSettings.SetDomainName("Root") |
| 728 | + .SetUseRealThreads(false) |
| 729 | + .SetEnableForceFollowers(true); |
| 730 | + |
| 731 | + Tests::TServer::TPtr server = new TServer(serverSettings); |
| 732 | + auto &runtime = *server->GetRuntime(); |
| 733 | + auto sender = runtime.AllocateEdgeActor(); |
| 734 | + |
| 735 | + runtime.SetLogPriority(NKikimrServices::TX_DATASHARD, NLog::PRI_TRACE); |
| 736 | + runtime.SetLogPriority(NKikimrServices::TX_PROXY, NLog::PRI_DEBUG); |
| 737 | + runtime.SetLogPriority(NKikimrServices::TABLET_EXECUTOR, NLog::PRI_TRACE); |
| 738 | + runtime.SetLogPriority(NKikimrServices::TABLET_SAUSAGECACHE, NLog::PRI_DEBUG); |
| 739 | + |
| 740 | + InitRoot(server, sender); |
| 741 | + |
| 742 | + TDisableDataShardLogBatching disableDataShardLogBatching; |
| 743 | + UNIT_ASSERT_VALUES_EQUAL( |
| 744 | + KqpSchemeExec(runtime, R"( |
| 745 | + CREATE TABLE `/Root/table` (key Uint32, value Uint32, PRIMARY KEY (key)) |
| 746 | + WITH (READ_REPLICAS_SETTINGS = "PER_AZ:1"); |
| 747 | + )"), |
| 748 | + "SUCCESS"); |
| 749 | + |
| 750 | + auto shards = GetTableShards(server, sender, "/Root/table"); |
| 751 | + UNIT_ASSERT_VALUES_EQUAL(shards.size(), 1UL); |
| 752 | + |
| 753 | + ExecSQL(server, sender, "UPSERT INTO `/Root/table` (key, value) VALUES (1, 11), (2, 22), (3, 33);"); |
| 754 | + |
| 755 | + // Wait for leader to promote the follower read edge (and stop writing to the Sys table) |
| 756 | + Cerr << "... sleeping after upsert" << Endl; |
| 757 | + runtime.SimulateSleep(TDuration::Seconds(1)); |
| 758 | + |
| 759 | + auto modifyReads = runtime.AddObserver<TEvDataShard::TEvRead>( |
| 760 | + [&](TEvDataShard::TEvRead::TPtr& ev) { |
| 761 | + ev->Get()->Record.SetMaxRowsInResult(1); |
| 762 | + }); |
| 763 | + TBlockEvents<TEvDataShard::TEvReadContinue> blockedContinue(runtime); |
| 764 | + |
| 765 | + auto readFuture = KqpSimpleStaleRoSend(runtime, "SELECT key, value FROM `/Root/table` ORDER BY key", "/Root"); |
| 766 | + runtime.WaitFor("the first TEvReadContinue", [&]{ return blockedContinue.size() >= 1; }); |
| 767 | + |
| 768 | + Cerr << "... splitting table at key 3" << Endl; |
| 769 | + SetSplitMergePartCountLimit(&runtime, -1); |
| 770 | + ui64 txId = AsyncSplitTable(server, sender, "/Root/table", shards.at(0), 3); |
| 771 | + WaitTxNotification(server, sender, txId); |
| 772 | + |
| 773 | + blockedContinue.Unblock().Stop(); |
| 774 | + UNIT_ASSERT_VALUES_EQUAL( |
| 775 | + FormatResult(runtime.WaitFuture(std::move(readFuture))), |
| 776 | + "ERROR: UNAVAILABLE"); |
| 777 | + |
| 778 | + Cerr << "... reading from the left follower" << Endl; |
| 779 | + UNIT_ASSERT_VALUES_EQUAL( |
| 780 | + KqpSimpleStaleRoExec(runtime, "SELECT key, value FROM `/Root/table` WHERE key < 3 ORDER BY key"), |
| 781 | + "{ items { uint32_value: 1 } items { uint32_value: 11 } }, " |
| 782 | + "{ items { uint32_value: 2 } items { uint32_value: 22 } }"); |
| 783 | + Cerr << "... reading from the right follower" << Endl; |
| 784 | + UNIT_ASSERT_VALUES_EQUAL( |
| 785 | + KqpSimpleStaleRoExec(runtime, "SELECT key, value FROM `/Root/table` WHERE key >= 3 ORDER BY key"), |
| 786 | + "{ items { uint32_value: 3 } items { uint32_value: 33 } }"); |
| 787 | + } |
| 788 | + |
723 | 789 | } // Y_UNIT_TEST_SUITE(DataShardFollowers)
|
724 | 790 |
|
725 | 791 | } // namespace NKikimr
|
0 commit comments