From 43ad74b6fb2eacf78a62144399f86f457da08d8e Mon Sep 17 00:00:00 2001 From: Sergey Belyakov Date: Thu, 29 Aug 2024 12:38:49 +0300 Subject: [PATCH 01/13] Move blobstorageproxies actor services to different monitoring shard (#8424) --- ydb/core/base/counters.cpp | 1 + ydb/core/blobstorage/dsproxy/dsproxy_monactor.cpp | 11 +++++++++-- ydb/core/mon/async_http_mon.cpp | 6 ++++-- ydb/core/mon/mon.h | 1 + ydb/core/mon/mon_impl.h | 4 +++- ydb/core/mon/sync_http_mon.cpp | 3 ++- ydb/deploy/helm/ydb-prometheus/values.yaml | 1 + 7 files changed, 21 insertions(+), 6 deletions(-) diff --git a/ydb/core/base/counters.cpp b/ydb/core/base/counters.cpp index 538266429c5a..ddb144838703 100644 --- a/ydb/core/base/counters.cpp +++ b/ydb/core/base/counters.cpp @@ -10,6 +10,7 @@ static const THashSet DATABASE_SERVICES = {{ TString("compile"), TString("coordinator"), TString("dsproxy"), + TString("dsproxy_mon"), TString("dsproxynode"), TString("dsproxy_overview"), TString("dsproxy_percentile"), diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_monactor.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_monactor.cpp index c082235cc18f..6aba81c8ec94 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_monactor.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_monactor.cpp @@ -402,8 +402,15 @@ class TBlobStorageGroupProxyMonActor : public TActorBootstrappedRegisterActorPage(proxiesMonPage, path, name, false, TlsActivationContext->ExecutorThread.ActorSystem, - SelfId()); + mon->RegisterActorPage(TMon::TRegisterActorPageFields{ + .Title = name, + .RelPath = path, + .ActorSystem = TlsActivationContext->ExecutorThread.ActorSystem, + .Index = proxiesMonPage, + .PreTag = false, + .ActorId = SelfId(), + .MonServiceName = "dsproxy_mon" + }); } Become(&TThis::StateOnline); diff --git a/ydb/core/mon/async_http_mon.cpp b/ydb/core/mon/async_http_mon.cpp index 2dec0b669fcd..5542359baefd 100644 --- a/ydb/core/mon/async_http_mon.cpp +++ b/ydb/core/mon/async_http_mon.cpp @@ -219,7 +219,8 @@ class THttpMonLegacyActorRequest : public TActorBootstrappedGet()->Request->URL.Before('?')); TString status(response->Status); - NMonitoring::THistogramPtr ResponseTimeHgram = NKikimr::GetServiceCounters(NKikimr::AppData()->Counters, "utils") + NMonitoring::THistogramPtr ResponseTimeHgram = NKikimr::GetServiceCounters(NKikimr::AppData()->Counters, + ActorMonPage->MonServiceName) ->GetSubgroup("subsystem", "mon") ->GetSubgroup("url", url) ->GetSubgroup("status", status) @@ -870,7 +871,8 @@ NMonitoring::IMonPage* TAsyncHttpMon::RegisterActorPage(TRegisterActorPageFields fields.ActorSystem, fields.ActorId, fields.AllowedSIDs ? fields.AllowedSIDs : Config.AllowedSIDs, - fields.UseAuth ? Config.Authorizer : TRequestAuthorizer()); + fields.UseAuth ? Config.Authorizer : TRequestAuthorizer(), + fields.MonServiceName); if (fields.Index) { fields.Index->Register(page); if (fields.SortPages) { diff --git a/ydb/core/mon/mon.h b/ydb/core/mon/mon.h index 0252e2b061cb..bdf30bc354ba 100644 --- a/ydb/core/mon/mon.h +++ b/ydb/core/mon/mon.h @@ -60,6 +60,7 @@ class TMon { bool UseAuth = true; TVector AllowedSIDs; bool SortPages = true; + TString MonServiceName = "utils"; }; virtual NMonitoring::IMonPage* RegisterActorPage(TRegisterActorPageFields fields) = 0; diff --git a/ydb/core/mon/mon_impl.h b/ydb/core/mon/mon_impl.h index 135b95c69ef9..99ea18cba1d7 100644 --- a/ydb/core/mon/mon_impl.h +++ b/ydb/core/mon/mon_impl.h @@ -360,7 +360,7 @@ class TActorMonPage: public IMonPage { public: TActorMonPage(const TString &path, const TString &title, const TString &host, bool preTag, TActorSystem *actorSystem, const TActorId &actorId, const TVector &sids, - TMon::TRequestAuthorizer authorizer) + TMon::TRequestAuthorizer authorizer, TString monServiceName = "utils") : IMonPage(path, title) , Host(host) , PreTag(preTag) @@ -368,6 +368,7 @@ class TActorMonPage: public IMonPage { , TargetActorId(actorId) , AllowedSIDs(sids) , Authorizer(std::move(authorizer)) + , MonServiceName(monServiceName) { } @@ -405,6 +406,7 @@ class TActorMonPage: public IMonPage { TActorId TargetActorId; const TVector AllowedSIDs; TMon::TRequestAuthorizer Authorizer; + TString MonServiceName; }; inline TString GetPageFullPath(const NMonitoring::IMonPage* page) { diff --git a/ydb/core/mon/sync_http_mon.cpp b/ydb/core/mon/sync_http_mon.cpp index 822cd7e3389e..4fc334def9d8 100644 --- a/ydb/core/mon/sync_http_mon.cpp +++ b/ydb/core/mon/sync_http_mon.cpp @@ -81,7 +81,8 @@ namespace NActors { fields.ActorSystem, fields.ActorId, fields.AllowedSIDs ? fields.AllowedSIDs : Config.AllowedSIDs, - fields.UseAuth ? Config.Authorizer : TRequestAuthorizer()); + fields.UseAuth ? Config.Authorizer : TRequestAuthorizer(), + fields.MonServiceName); if (fields.Index) { fields.Index->Register(page); if (fields.SortPages) { diff --git a/ydb/deploy/helm/ydb-prometheus/values.yaml b/ydb/deploy/helm/ydb-prometheus/values.yaml index 1321adf77c97..09239822cccf 100644 --- a/ydb/deploy/helm/ydb-prometheus/values.yaml +++ b/ydb/deploy/helm/ydb-prometheus/values.yaml @@ -102,6 +102,7 @@ ydb: format: prometheus tenant: "{{ .target.path }}" skipNameRelabeling: true + - counter: dsproxy_mon # Cluster which will be monitored clusters: [] From c2e94d8a63fd1c4d290011b3c44c672c57b539f2 Mon Sep 17 00:00:00 2001 From: Alexander Rutkovsky Date: Tue, 9 Jul 2024 21:28:54 +0300 Subject: [PATCH 02/13] Create separate trace id for BlobStorage requests and link them to original traces (#6444) --- ydb/core/base/blobstorage.cpp | 111 ++++++++++++++++++ ydb/core/base/blobstorage.h | 24 ++++ ydb/core/blobstorage/dsproxy/dsproxy.h | 22 +++- .../dsproxy/dsproxy_assimilate.cpp | 2 +- .../blobstorage/dsproxy/dsproxy_block.cpp | 11 +- .../blobstorage/dsproxy/dsproxy_collect.cpp | 2 +- .../blobstorage/dsproxy/dsproxy_discover.cpp | 3 +- .../dsproxy/dsproxy_discover_m3dc.cpp | 3 +- .../dsproxy/dsproxy_discover_m3of4.cpp | 3 +- ydb/core/blobstorage/dsproxy/dsproxy_get.cpp | 11 +- .../dsproxy/dsproxy_indexrestoreget.cpp | 11 +- .../dsproxy/dsproxy_multicollect.cpp | 2 +- .../blobstorage/dsproxy/dsproxy_multiget.cpp | 11 +- .../blobstorage/dsproxy/dsproxy_patch.cpp | 11 +- ydb/core/blobstorage/dsproxy/dsproxy_put.cpp | 13 +- .../blobstorage/dsproxy/dsproxy_range.cpp | 7 +- .../blobstorage/dsproxy/dsproxy_status.cpp | 2 +- ydb/library/actors/wilson/wilson_span.h | 4 + ydb/library/actors/wilson/wilson_trace.h | 4 + 19 files changed, 189 insertions(+), 68 deletions(-) diff --git a/ydb/core/base/blobstorage.cpp b/ydb/core/base/blobstorage.cpp index 59bfb48fa560..b891d7777d1e 100644 --- a/ydb/core/base/blobstorage.cpp +++ b/ydb/core/base/blobstorage.cpp @@ -1,4 +1,6 @@ #include "blobstorage.h" +#include +#include namespace NKikimr { @@ -44,6 +46,12 @@ bool operator<(const TPDiskCategory x, const TPDiskCategory y) { return std::make_tuple(x.Type(), x.Kind()) < std::make_tuple(y.Type(), y.Kind()); } +void TEvBlobStorage::TEvPut::ToSpan(NWilson::TSpan& span) const { + span + .Attribute("Id", Id.ToString()) + .Attribute("PutHandleClass", NKikimrBlobStorage::EPutHandleClass_Name(HandleClass)); +} + std::unique_ptr TEvBlobStorage::TEvPut::MakeErrorResponse( NKikimrProto::EReplyStatus status, const TString& errorReason, TGroupId groupId) { auto res = std::make_unique(status, Id, TStorageStatusFlags(), groupId, 0.0f); @@ -51,6 +59,36 @@ std::unique_ptr TEvBlobStorage::TEvPut::MakeErrorR return res; } +void TEvBlobStorage::TEvGet::ToSpan(NWilson::TSpan& span) const { + i64 totalSize = 0; + for (ui32 i = 0; i < QuerySize; ++i) { + const auto& q = Queries[i]; + if (q.Shift < q.Id.BlobSize()) { + totalSize += Min(q.Id.BlobSize() - q.Shift, q.Size ? q.Size : Max()); + } + } + + span + .Attribute("TotalSize", totalSize) + .Attribute("GetHandleClass", NKikimrBlobStorage::EGetHandleClass_Name(GetHandleClass)) + .Attribute("MustRestoreFirst", MustRestoreFirst) + .Attribute("IsIndexOnly", IsIndexOnly); + + if (span.GetTraceId().GetVerbosity() >= TWilson::DsProxyInternals) { + NWilson::TArrayValue queries; + queries.reserve(QuerySize); + for (ui32 i = 0; i < QuerySize; ++i) { + const auto& q = Queries[i]; + queries.emplace_back(NWilson::TKeyValueList{{ + {"Id", q.Id.ToString()}, + {"Shift", q.Shift}, + {"Size", q.Size}, + }}); + } + span.Attribute("Queries", std::move(queries)); + } +} + std::unique_ptr TEvBlobStorage::TEvGet::MakeErrorResponse( NKikimrProto::EReplyStatus status, const TString& errorReason, TGroupId groupId) { auto res = std::make_unique(status, QuerySize, groupId); @@ -67,6 +105,12 @@ std::unique_ptr TEvBlobStorage::TEvGet::MakeErrorR return res; } +void TEvBlobStorage::TEvBlock::ToSpan(NWilson::TSpan& span) const { + span + .Attribute("TabletId", ::ToString(TabletId)) + .Attribute("Generation", Generation); +} + std::unique_ptr TEvBlobStorage::TEvBlock::MakeErrorResponse( NKikimrProto::EReplyStatus status, const TString& errorReason, TGroupId /*groupId*/) { auto res = std::make_unique(status); @@ -74,6 +118,13 @@ std::unique_ptr TEvBlobStorage::TEvBlock::MakeEr return res; } +void TEvBlobStorage::TEvPatch::ToSpan(NWilson::TSpan& span) const { + span + .Attribute("OriginalGroupId", OriginalGroupId) + .Attribute("OriginalId", OriginalId.ToString()) + .Attribute("PatchedId", PatchedId.ToString()); +} + std::unique_ptr TEvBlobStorage::TEvPatch::MakeErrorResponse( NKikimrProto::EReplyStatus status, const TString& errorReason, TGroupId groupId) { auto res = std::make_unique(status, PatchedId, TStorageStatusFlags(), groupId, 0.0f); @@ -81,6 +132,9 @@ std::unique_ptr TEvBlobStorage::TEvPatch::MakeEr return res; } +void TEvBlobStorage::TEvInplacePatch::ToSpan(NWilson::TSpan& /*span*/) const { +} + std::unique_ptr TEvBlobStorage::TEvInplacePatch::MakeErrorResponse( NKikimrProto::EReplyStatus status, const TString& errorReason) { auto res = std::make_unique(status, PatchedId, TStorageStatusFlags(), 0.0f); @@ -88,6 +142,12 @@ std::unique_ptr TEvBlobStorage::TEvInplac return res; } +void TEvBlobStorage::TEvDiscover::ToSpan(NWilson::TSpan& span) const { + span + .Attribute("TabletId", ::ToString(TabletId)) + .Attribute("ReadBody", ReadBody); +} + std::unique_ptr TEvBlobStorage::TEvDiscover::MakeErrorResponse( NKikimrProto::EReplyStatus status, const TString& errorReason, TGroupId/*groupId*/) { auto res = std::make_unique(status, MinGeneration, 0); @@ -95,6 +155,15 @@ std::unique_ptr TEvBlobStorage::TEvDiscover:: return res; } +void TEvBlobStorage::TEvRange::ToSpan(NWilson::TSpan& span) const { + span + .Attribute("TabletId", ::ToString(TabletId)) + .Attribute("From", From.ToString()) + .Attribute("To", To.ToString()) + .Attribute("MustRestoreFirst", MustRestoreFirst) + .Attribute("IsIndexOnly", IsIndexOnly); +} + std::unique_ptr TEvBlobStorage::TEvRange::MakeErrorResponse( NKikimrProto::EReplyStatus status, const TString& errorReason, TGroupId groupId) { auto res = std::make_unique(status, From, To, groupId); @@ -102,6 +171,42 @@ std::unique_ptr TEvBlobStorage::TEvRange::MakeEr return res; } +void TEvBlobStorage::TEvCollectGarbage::ToSpan(NWilson::TSpan& span) const { + span + .Attribute("TabletId", ::ToString(TabletId)) + .Attribute("RecordGeneration", RecordGeneration) + .Attribute("PerGenerationCounter", PerGenerationCounter) + .Attribute("Channel", Channel); + + if (Collect) { + span + .Attribute("CollectGeneration", CollectGeneration) + .Attribute("CollectStep", CollectStep); + } + + if (span.GetTraceId().GetVerbosity() >= TWilson::DsProxyInternals) { + auto vector = [&](const auto& name, const auto& v) { + if (v) { + NWilson::TArrayValue items; + items.reserve(v->size()); + for (const TLogoBlobID& id : *v) { + items.emplace_back(id.ToString()); + } + span.Attribute(name, std::move(items)); + } + }; + vector("Keep", Keep); + vector("DoNotKeep", DoNotKeep); + } else { + if (Keep) { + span.Attribute("NumKeep", static_cast(Keep->size())); + } + if (DoNotKeep) { + span.Attribute("NumDoNotKeep", static_cast(DoNotKeep->size())); + } + } +} + std::unique_ptr TEvBlobStorage::TEvCollectGarbage::MakeErrorResponse( NKikimrProto::EReplyStatus status, const TString& errorReason, TGroupId /*groupId*/) { auto res = std::make_unique(status, TabletId, RecordGeneration, PerGenerationCounter, Channel); @@ -109,6 +214,9 @@ std::unique_ptr TEvBlobStorage::TEvColl return res; } +void TEvBlobStorage::TEvStatus::ToSpan(NWilson::TSpan& /*span*/) const +{} + std::unique_ptr TEvBlobStorage::TEvStatus::MakeErrorResponse( NKikimrProto::EReplyStatus status, const TString& errorReason, TGroupId /*groupId*/) { auto res = std::make_unique(status, TStorageStatusFlags()); @@ -116,6 +224,9 @@ std::unique_ptr TEvBlobStorage::TEvStatus::Make return res; } +void TEvBlobStorage::TEvAssimilate::ToSpan(NWilson::TSpan& /*span*/) const +{} + std::unique_ptr TEvBlobStorage::TEvAssimilate::MakeErrorResponse( NKikimrProto::EReplyStatus status, const TString& errorReason, TGroupId/*groupId*/) { return std::make_unique(status, errorReason); diff --git a/ydb/core/base/blobstorage.h b/ydb/core/base/blobstorage.h index 2413eb2c4235..d8ebabbf6608 100644 --- a/ydb/core/base/blobstorage.h +++ b/ydb/core/base/blobstorage.h @@ -25,6 +25,10 @@ #include +namespace NWilson { + class TSpan; +} // NWilson + namespace NKikimr { static constexpr ui32 MaxProtobufSize = 67108000; @@ -993,6 +997,8 @@ struct TEvBlobStorage { return sizeof(*this) + Buffer.GetSize(); } + void ToSpan(NWilson::TSpan& span) const; + std::unique_ptr MakeErrorResponse(NKikimrProto::EReplyStatus status, const TString& errorReason, TGroupId groupId); }; @@ -1196,6 +1202,8 @@ struct TEvBlobStorage { return sizeof(*this) + QuerySize * sizeof(TQuery); } + void ToSpan(NWilson::TSpan& span) const; + std::unique_ptr MakeErrorResponse(NKikimrProto::EReplyStatus status, const TString& errorReason, TGroupId groupId); @@ -1349,6 +1357,8 @@ struct TEvBlobStorage { return sizeof(*this); } + void ToSpan(NWilson::TSpan& span) const; + std::unique_ptr MakeErrorResponse(NKikimrProto::EReplyStatus status, const TString& errorReason, TGroupId groupId); }; @@ -1546,6 +1556,8 @@ struct TEvBlobStorage { return sizeof(*this) + sizeof(TDiff) * DiffCount; } + void ToSpan(NWilson::TSpan& span) const; + std::unique_ptr MakeErrorResponse(NKikimrProto::EReplyStatus status, const TString& errorReason, TGroupId groupId); }; @@ -1636,6 +1648,8 @@ struct TEvBlobStorage { return sizeof(*this) + sizeof(TDiff) * DiffCount; } + void ToSpan(NWilson::TSpan& span) const; + std::unique_ptr MakeErrorResponse(NKikimrProto::EReplyStatus status, const TString& errorReason); }; @@ -1723,6 +1737,8 @@ struct TEvBlobStorage { return sizeof(*this); } + void ToSpan(NWilson::TSpan& span) const; + std::unique_ptr MakeErrorResponse(NKikimrProto::EReplyStatus status, const TString& errorReason, TGroupId groupId); }; @@ -1828,6 +1844,8 @@ struct TEvBlobStorage { return sizeof(*this); } + void ToSpan(NWilson::TSpan& span) const; + std::unique_ptr MakeErrorResponse(NKikimrProto::EReplyStatus status, const TString& errorReason, TGroupId groupId); }; @@ -2023,6 +2041,8 @@ struct TEvBlobStorage { return sizeof(*this) + ((Keep ? Keep->size() : 0) + (DoNotKeep ? DoNotKeep->size() : 0)) * sizeof(TLogoBlobID); } + void ToSpan(NWilson::TSpan& span) const; + std::unique_ptr MakeErrorResponse(NKikimrProto::EReplyStatus status, const TString& errorReason, TGroupId groupId); }; @@ -2091,6 +2111,8 @@ struct TEvBlobStorage { return sizeof(*this); } + void ToSpan(NWilson::TSpan& span) const; + std::unique_ptr MakeErrorResponse(NKikimrProto::EReplyStatus status, const TString& errorReason, TGroupId groupId); }; @@ -2166,6 +2188,8 @@ struct TEvBlobStorage { return sizeof(*this); } + void ToSpan(NWilson::TSpan& span) const; + std::unique_ptr MakeErrorResponse(NKikimrProto::EReplyStatus status, const TString& errorReason, TGroupId groupId); }; diff --git a/ydb/core/blobstorage/dsproxy/dsproxy.h b/ydb/core/blobstorage/dsproxy/dsproxy.h index 55bcc513dcc6..f13f08849d64 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy.h +++ b/ydb/core/blobstorage/dsproxy/dsproxy.h @@ -175,18 +175,20 @@ class TBlobStorageGroupRequestActor : public TActor { return NKikimrServices::TActivity::BS_GROUP_REQUEST; } + template TBlobStorageGroupRequestActor(TIntrusivePtr info, TIntrusivePtr groupQueues, TIntrusivePtr mon, const TActorId& source, ui64 cookie, NKikimrServices::EServiceKikimr logComponent, bool logAccEnabled, TMaybe latencyQueueKind, TInstant now, TIntrusivePtr &storagePoolCounters, ui32 restartCounter, - NWilson::TSpan&& span, std::shared_ptr executionRelay) + NWilson::TTraceId&& traceId, const char *name, const TEv *event, + std::shared_ptr executionRelay) : TActor(&TThis::InitialStateFunc, TDerived::ActorActivityType()) , Info(std::move(info)) , GroupQueues(std::move(groupQueues)) , Mon(std::move(mon)) , PoolCounters(storagePoolCounters) , LogCtx(logComponent, logAccEnabled) - , Span(std::move(span)) + , ParentSpan(TWilson::BlobStorage, std::move(traceId), name) , RestartCounter(restartCounter) , CostModel(GroupQueues->CostModel) , Source(source) @@ -197,9 +199,16 @@ class TBlobStorageGroupRequestActor : public TActor { , ExecutionRelay(std::move(executionRelay)) { TDerived::ActiveCounter(Mon)->Inc(); - Span - .Attribute("GroupId", Info->GroupID.GetRawId()) - .Attribute("RestartCounter", RestartCounter); + + if (ParentSpan) { + const NWilson::TTraceId& parentTraceId = ParentSpan.GetTraceId(); + Span = NWilson::TSpan(TWilson::BlobStorage, NWilson::TTraceId::NewTraceId(parentTraceId.GetVerbosity(), + parentTraceId.GetTimeToLive()), ParentSpan.GetName()); + ParentSpan.Link(Span.GetTraceId()); + Span.Attribute("GroupId", Info->GroupID.GetRawId()); + Span.Attribute("RestartCounter", RestartCounter); + event->ToSpan(Span); + } Y_ABORT_UNLESS(CostModel); } @@ -561,8 +570,10 @@ class TBlobStorageGroupRequestActor : public TActor { if (term) { if (status == NKikimrProto::OK) { + ParentSpan.EndOk(); Span.EndOk(); } else { + ParentSpan.EndError(errorReason); Span.EndError(std::move(errorReason)); } } @@ -608,6 +619,7 @@ class TBlobStorageGroupRequestActor : public TActor { TIntrusivePtr Mon; TIntrusivePtr PoolCounters; TLogContext LogCtx; + NWilson::TSpan ParentSpan; NWilson::TSpan Span; TStackVec, 16> Responsiveness; TString ErrorReason; diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_assimilate.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_assimilate.cpp index f9000ce6cc66..5ff4788e5cc3 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_assimilate.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_assimilate.cpp @@ -270,7 +270,7 @@ class TBlobStorageGroupAssimilateRequest : public TBlobStorageGroupRequestActor< NWilson::TTraceId traceId, TInstant now, TIntrusivePtr& storagePoolCounters) : TBlobStorageGroupRequestActor(info, state, mon, source, cookie, NKikimrServices::BS_PROXY_ASSIMILATE, false, {}, now, storagePoolCounters, ev->RestartCounter, - NWilson::TSpan(TWilson::BlobStorage, std::move(traceId), "DSProxy.Assimilate"), std::move(ev->ExecutionRelay)) + std::move(traceId), "DSProxy.Assimilate", ev, std::move(ev->ExecutionRelay)) , SkipBlocksUpTo(ev->SkipBlocksUpTo) , SkipBarriersUpTo(ev->SkipBarriersUpTo) , SkipBlobsUpTo(ev->SkipBlobsUpTo) diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_block.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_block.cpp index e3aecd3b8bbe..8f56ec8b1cb7 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_block.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_block.cpp @@ -135,11 +135,11 @@ class TBlobStorageGroupBlockRequest : public TBlobStorageGroupRequestActor &info, const TIntrusivePtr &state, const TActorId &source, const TIntrusivePtr &mon, TEvBlobStorage::TEvBlock *ev, - ui64 cookie, NWilson::TSpan&& span, TInstant now, + ui64 cookie, NWilson::TTraceId&& traceId, TInstant now, TIntrusivePtr &storagePoolCounters) : TBlobStorageGroupRequestActor(info, state, mon, source, cookie, NKikimrServices::BS_PROXY_BLOCK, false, {}, now, storagePoolCounters, ev->RestartCounter, - std::move(span), std::move(ev->ExecutionRelay)) + std::move(traceId), "DSProxy.Block", ev, std::move(ev->ExecutionRelay)) , TabletId(ev->TabletId) , Generation(ev->Generation) , Deadline(ev->Deadline) @@ -179,12 +179,7 @@ IActor* CreateBlobStorageGroupBlockRequest(const TIntrusivePtr &state, const TActorId &source, const TIntrusivePtr &mon, TEvBlobStorage::TEvBlock *ev, ui64 cookie, NWilson::TTraceId traceId, TInstant now, TIntrusivePtr &storagePoolCounters) { - NWilson::TSpan span(TWilson::BlobStorage, std::move(traceId), "DSProxy.Block"); - if (span) { - span.Attribute("event", ev->ToString()); - } - - return new TBlobStorageGroupBlockRequest(info, state, source, mon, ev, cookie, std::move(span), now, storagePoolCounters); + return new TBlobStorageGroupBlockRequest(info, state, source, mon, ev, cookie, std::move(traceId), now, storagePoolCounters); } } // NKikimr diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_collect.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_collect.cpp index 9a091ae2e91c..4d94f34be55f 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_collect.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_collect.cpp @@ -147,7 +147,7 @@ class TBlobStorageGroupCollectGarbageRequest : public TBlobStorageGroupRequestAc NWilson::TTraceId traceId, TInstant now, TIntrusivePtr &storagePoolCounters) : TBlobStorageGroupRequestActor(info, state, mon, source, cookie, NKikimrServices::BS_PROXY_COLLECT, false, {}, now, storagePoolCounters, ev->RestartCounter, - NWilson::TSpan(TWilson::BlobStorage, std::move(traceId), "DSProxy.CollectGarbage"), std::move(ev->ExecutionRelay)) + std::move(traceId), "DSProxy.CollectGarbage", ev, std::move(ev->ExecutionRelay)) , TabletId(ev->TabletId) , RecordGeneration(ev->RecordGeneration) , PerGenerationCounter(ev->PerGenerationCounter) diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_discover.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_discover.cpp index 72624aba6c49..f05d9777a194 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_discover.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_discover.cpp @@ -884,8 +884,7 @@ class TBlobStorageGroupDiscoverRequest : public TBlobStorageGroupRequestActor &storagePoolCounters) : TBlobStorageGroupRequestActor(info, state, mon, source, cookie, NKikimrServices::BS_PROXY_DISCOVER, true, {}, now, storagePoolCounters, ev->RestartCounter, - NWilson::TSpan(TWilson::BlobStorage, std::move(traceId), "DSProxy.Discover"), - std::move(ev->ExecutionRelay)) + std::move(traceId), "DSProxy.Discover", ev, std::move(ev->ExecutionRelay)) , TabletId(ev->TabletId) , MinGeneration(ev->MinGeneration) , ReadBody(ev->ReadBody) diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_discover_m3dc.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_discover_m3dc.cpp index aaf5b8224e7b..5012636cfc65 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_discover_m3dc.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_discover_m3dc.cpp @@ -464,8 +464,7 @@ class TBlobStorageGroupMirror3dcDiscoverRequest : public TBlobStorageGroupReques TIntrusivePtr &storagePoolCounters) : TBlobStorageGroupRequestActor(std::move(info), std::move(state), std::move(mon), source, cookie, NKikimrServices::BS_PROXY_DISCOVER, false, {}, now, storagePoolCounters, ev->RestartCounter, - NWilson::TSpan(TWilson::BlobStorage, std::move(traceId), "DSProxy.Discover(mirror-3-dc)"), - std::move(ev->ExecutionRelay)) + std::move(traceId), "DSProxy.Discover", ev, std::move(ev->ExecutionRelay)) , TabletId(ev->TabletId) , MinGeneration(ev->MinGeneration) , StartTime(now) diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_discover_m3of4.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_discover_m3of4.cpp index 4f31a1d83311..c71008096b35 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_discover_m3of4.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_discover_m3of4.cpp @@ -36,8 +36,7 @@ class TBlobStorageGroupMirror3of4DiscoverRequest TIntrusivePtr &storagePoolCounters) : TBlobStorageGroupRequestActor(std::move(info), std::move(state), std::move(mon), source, cookie, NKikimrServices::BS_PROXY_DISCOVER, false, {}, now, storagePoolCounters, ev->RestartCounter, - NWilson::TSpan(TWilson::BlobStorage, std::move(traceId), "DSProxy.Discover(mirror-3of4)"), - std::move(ev->ExecutionRelay)) + std::move(traceId), "DSProxy.Discover", ev, std::move(ev->ExecutionRelay)) , TabletId(ev->TabletId) , MinGeneration(ev->MinGeneration) , StartTime(now) diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_get.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_get.cpp index 5f09fdee38a0..7f029e926edc 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_get.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_get.cpp @@ -395,11 +395,11 @@ class TBlobStorageGroupGetRequest : public TBlobStorageGroupRequestActor &info, const TIntrusivePtr &state, const TActorId &source, const TIntrusivePtr &mon, TEvBlobStorage::TEvGet *ev, ui64 cookie, - NWilson::TSpan&& span, TNodeLayoutInfoPtr&& nodeLayout, TMaybe latencyQueueKind, + NWilson::TTraceId&& traceId, TNodeLayoutInfoPtr&& nodeLayout, TMaybe latencyQueueKind, TInstant now, TIntrusivePtr &storagePoolCounters) : TBlobStorageGroupRequestActor(info, state, mon, source, cookie, NKikimrServices::BS_PROXY_GET, ev->IsVerboseNoDataEnabled || ev->CollectDebugInfo, - latencyQueueKind, now, storagePoolCounters, ev->RestartCounter, std::move(span), + latencyQueueKind, now, storagePoolCounters, ev->RestartCounter, std::move(traceId), "DSProxy.Get", ev, std::move(ev->ExecutionRelay)) , GetImpl(info, state, ev, std::move(nodeLayout), LogCtx.RequestPrefix) , Orbit(std::move(ev->Orbit)) @@ -472,12 +472,7 @@ IActor* CreateBlobStorageGroupGetRequest(const TIntrusivePtr latencyQueueKind, TInstant now, TIntrusivePtr &storagePoolCounters) { - NWilson::TSpan span(TWilson::BlobStorage, std::move(traceId), "DSProxy.Get"); - if (span) { - span.Attribute("event", ev->ToString()); - } - - return new TBlobStorageGroupGetRequest(info, state, source, mon, ev, cookie, std::move(span), + return new TBlobStorageGroupGetRequest(info, state, source, mon, ev, cookie, std::move(traceId), std::move(nodeLayout), latencyQueueKind, now, storagePoolCounters); } diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_indexrestoreget.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_indexrestoreget.cpp index c86ae72db269..c3ee5c87e921 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_indexrestoreget.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_indexrestoreget.cpp @@ -269,11 +269,11 @@ class TBlobStorageGroupIndexRestoreGetRequest TBlobStorageGroupIndexRestoreGetRequest(const TIntrusivePtr &info, const TIntrusivePtr &state, const TActorId &source, const TIntrusivePtr &mon, TEvBlobStorage::TEvGet *ev, ui64 cookie, - NWilson::TSpan&& span, TMaybe latencyQueueKind, TInstant now, + NWilson::TTraceId&& traceId, TMaybe latencyQueueKind, TInstant now, TIntrusivePtr &storagePoolCounters) : TBlobStorageGroupRequestActor(info, state, mon, source, cookie, NKikimrServices::BS_PROXY_INDEXRESTOREGET, false, latencyQueueKind, now, storagePoolCounters, - ev->RestartCounter, std::move(span), std::move(ev->ExecutionRelay)) + ev->RestartCounter, std::move(traceId), "DSProxy.IndexRestoreGet", ev, std::move(ev->ExecutionRelay)) , QuerySize(ev->QuerySize) , Queries(ev->Queries.Release()) , Deadline(ev->Deadline) @@ -399,12 +399,7 @@ IActor* CreateBlobStorageGroupIndexRestoreGetRequest(const TIntrusivePtr &mon, TEvBlobStorage::TEvGet *ev, ui64 cookie, NWilson::TTraceId traceId, TMaybe latencyQueueKind, TInstant now, TIntrusivePtr &storagePoolCounters) { - NWilson::TSpan span(TWilson::BlobStorage, std::move(traceId), "DSProxy.IndexRestoreGet"); - if (span) { - span.Attribute("event", ev->ToString()); - } - - return new TBlobStorageGroupIndexRestoreGetRequest(info, state, source, mon, ev, cookie, std::move(span), + return new TBlobStorageGroupIndexRestoreGetRequest(info, state, source, mon, ev, cookie, std::move(traceId), latencyQueueKind, now, storagePoolCounters); } diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_multicollect.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_multicollect.cpp index 5440f44b30ba..f19a25995253 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_multicollect.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_multicollect.cpp @@ -99,7 +99,7 @@ class TBlobStorageGroupMultiCollectRequest NWilson::TTraceId traceId, TInstant now, TIntrusivePtr &storagePoolCounters) : TBlobStorageGroupRequestActor(info, state, mon, source, cookie, NKikimrServices::BS_PROXY_MULTICOLLECT, false, {}, now, storagePoolCounters, 0, - NWilson::TSpan(TWilson::BlobStorage, std::move(traceId), "DSProxy.MultiCollect"), std::move(ev->ExecutionRelay)) + std::move(traceId), "DSProxy.MultiCollect", ev, std::move(ev->ExecutionRelay)) , Iterations(ev->PerGenerationCounterStepSize()) , TabletId(ev->TabletId) , RecordGeneration(ev->RecordGeneration) diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_multiget.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_multiget.cpp index 52483b486312..6ea6814c0da7 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_multiget.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_multiget.cpp @@ -98,11 +98,11 @@ class TBlobStorageGroupMultiGetRequest : public TBlobStorageGroupRequestActor &info, const TIntrusivePtr &state, const TActorId &source, const TIntrusivePtr &mon, TEvBlobStorage::TEvGet *ev, ui64 cookie, - NWilson::TSpan&& span, TMaybe latencyQueueKind, TInstant now, + NWilson::TTraceId&& traceId, TMaybe latencyQueueKind, TInstant now, TIntrusivePtr &storagePoolCounters) : TBlobStorageGroupRequestActor(info, state, mon, source, cookie, NKikimrServices::BS_PROXY_MULTIGET, false, latencyQueueKind, now, storagePoolCounters, 0, - std::move(span), std::move(ev->ExecutionRelay)) + std::move(traceId), "DSProxy.MultiGet", ev, std::move(ev->ExecutionRelay)) , QuerySize(ev->QuerySize) , Queries(ev->Queries.Release()) , Deadline(ev->Deadline) @@ -213,12 +213,7 @@ IActor* CreateBlobStorageGroupMultiGetRequest(const TIntrusivePtr &mon, TEvBlobStorage::TEvGet *ev, ui64 cookie, NWilson::TTraceId traceId, TMaybe latencyQueueKind, TInstant now, TIntrusivePtr &storagePoolCounters) { - NWilson::TSpan span(TWilson::BlobStorage, std::move(traceId), "DSProxy.MultiGet"); - if (span) { - span.Attribute("event", ev->ToString()); - } - - return new TBlobStorageGroupMultiGetRequest(info, state, source, mon, ev, cookie, std::move(span), + return new TBlobStorageGroupMultiGetRequest(info, state, source, mon, ev, cookie, std::move(traceId), latencyQueueKind, now, storagePoolCounters); } diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_patch.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_patch.cpp index fbb545409a89..9590da06a67f 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_patch.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_patch.cpp @@ -142,12 +142,12 @@ class TBlobStorageGroupPatchRequest : public TBlobStorageGroupRequestActor &info, const TIntrusivePtr &state, const TActorId &source, const TIntrusivePtr &mon, TEvBlobStorage::TEvPatch *ev, - ui64 cookie, NWilson::TSpan&& span, TInstant now, + ui64 cookie, NWilson::TTraceId&& traceId, TInstant now, TIntrusivePtr &storagePoolCounters, bool useVPatch = false) : TBlobStorageGroupRequestActor(info, state, mon, source, cookie, NKikimrServices::BS_PROXY_PATCH, false, {}, now, storagePoolCounters, - ev->RestartCounter, std::move(span), std::move(ev->ExecutionRelay)) + ev->RestartCounter, std::move(traceId), "DSProxy.Patch", ev, std::move(ev->ExecutionRelay)) , OriginalGroupId(TGroupId::FromValue(ev->OriginalGroupId)) , OriginalId(ev->OriginalId) , PatchedId(ev->PatchedId) @@ -1051,12 +1051,7 @@ IActor* CreateBlobStorageGroupPatchRequest(const TIntrusivePtr &storagePoolCounters, bool useVPatch) { - NWilson::TSpan span(TWilson::BlobStorage, std::move(traceId), "DSProxy.Patch"); - if (span) { - span.Attribute("event", ev->ToString()); - } - - return new TBlobStorageGroupPatchRequest(info, state, source, mon, ev, cookie, std::move(span), now, + return new TBlobStorageGroupPatchRequest(info, state, source, mon, ev, cookie, std::move(traceId), now, storagePoolCounters, useVPatch); } diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_put.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_put.cpp index 1fc110a6d17d..52882b19f93e 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_put.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_put.cpp @@ -502,14 +502,14 @@ class TBlobStorageGroupPutRequest : public TBlobStorageGroupRequestActor &info, const TIntrusivePtr &state, const TActorId &source, const TIntrusivePtr &mon, TEvBlobStorage::TEvPut *ev, - ui64 cookie, NWilson::TSpan&& span, bool timeStatsEnabled, + ui64 cookie, NWilson::TTraceId&& traceId, bool timeStatsEnabled, TDiskResponsivenessTracker::TPerDiskStatsPtr stats, TMaybe latencyQueueKind, TInstant now, TIntrusivePtr &storagePoolCounters, bool enableRequestMod3x3ForMinLatecy) : TBlobStorageGroupRequestActor(info, state, mon, source, cookie, NKikimrServices::BS_PROXY_PUT, false, latencyQueueKind, now, storagePoolCounters, - ev->RestartCounter, std::move(span), nullptr) + ev->RestartCounter, std::move(traceId), "DSProxy.Put", ev, nullptr) , PutImpl(info, state, ev, mon, enableRequestMod3x3ForMinLatecy, source, cookie, Span.GetTraceId()) , WaitingVDiskResponseCount(info->GetTotalVDisksNum()) , HandleClass(ev->HandleClass) @@ -549,7 +549,7 @@ class TBlobStorageGroupPutRequest : public TBlobStorageGroupRequestActor(nullptr), nullptr) , PutImpl(info, state, events, mon, handleClass, tactic, enableRequestMod3x3ForMinLatecy) , WaitingVDiskResponseCount(info->GetTotalVDisksNum()) , IsManyPuts(true) @@ -760,12 +760,7 @@ IActor* CreateBlobStorageGroupPutRequest(const TIntrusivePtr latencyQueueKind, TInstant now, TIntrusivePtr &storagePoolCounters, bool enableRequestMod3x3ForMinLatecy) { - NWilson::TSpan span(TWilson::BlobStorage, std::move(traceId), "DSProxy.Put"); - if (span) { - span.Attribute("event", ev->ToString()); - } - - return new TBlobStorageGroupPutRequest(info, state, source, mon, ev, cookie, std::move(span), timeStatsEnabled, + return new TBlobStorageGroupPutRequest(info, state, source, mon, ev, cookie, std::move(traceId), timeStatsEnabled, std::move(stats), latencyQueueKind, now, storagePoolCounters, enableRequestMod3x3ForMinLatecy); } diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_range.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_range.cpp index 246ee8417719..188efb359cf1 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_range.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_range.cpp @@ -342,11 +342,11 @@ class TBlobStorageGroupRangeRequest : public TBlobStorageGroupRequestActor &info, const TIntrusivePtr &state, const TActorId &source, const TIntrusivePtr &mon, TEvBlobStorage::TEvRange *ev, - ui64 cookie, NWilson::TSpan&& span, TInstant now, + ui64 cookie, NWilson::TTraceId&& traceId, TInstant now, TIntrusivePtr &storagePoolCounters) : TBlobStorageGroupRequestActor(info, state, mon, source, cookie, NKikimrServices::BS_PROXY_RANGE, false, {}, now, storagePoolCounters, - ev->RestartCounter, std::move(span), std::move(ev->ExecutionRelay)) + ev->RestartCounter, std::move(traceId), "DSProxy.Range", ev, std::move(ev->ExecutionRelay)) , TabletId(ev->TabletId) , From(ev->From) , To(ev->To) @@ -407,8 +407,7 @@ IActor* CreateBlobStorageGroupRangeRequest(const TIntrusivePtr &mon, TEvBlobStorage::TEvRange *ev, ui64 cookie, NWilson::TTraceId traceId, TInstant now, TIntrusivePtr &storagePoolCounters) { - NWilson::TSpan span(TWilson::BlobStorage, std::move(traceId), "DSProxy.Range"); - return new TBlobStorageGroupRangeRequest(info, state, source, mon, ev, cookie, std::move(span), now, + return new TBlobStorageGroupRangeRequest(info, state, source, mon, ev, cookie, std::move(traceId), now, storagePoolCounters); } diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_status.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_status.cpp index 293c7db6d3f2..9ecdd12055d0 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_status.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_status.cpp @@ -90,7 +90,7 @@ class TBlobStorageGroupStatusRequest : public TBlobStorageGroupRequestActor &storagePoolCounters) : TBlobStorageGroupRequestActor(info, state, mon, source, cookie, NKikimrServices::BS_PROXY_STATUS, false, {}, now, storagePoolCounters, ev->RestartCounter, - NWilson::TSpan(TWilson::BlobStorage, std::move(traceId), "DSProxy.Status"), std::move(ev->ExecutionRelay)) + std::move(traceId), "DSProxy.Status", ev, std::move(ev->ExecutionRelay)) , Deadline(ev->Deadline) , Requests(0) , Responses(0) diff --git a/ydb/library/actors/wilson/wilson_span.h b/ydb/library/actors/wilson/wilson_span.h index 20170a896ea7..3dbf906137fd 100644 --- a/ydb/library/actors/wilson/wilson_span.h +++ b/ydb/library/actors/wilson/wilson_span.h @@ -252,6 +252,10 @@ namespace NWilson { return TSpan(verbosity, GetTraceId(), std::move(name), flags, GetActorSystem()); } + TString GetName() const { + return *this ? Data->Span.name() : TString(); + } + static const TSpan Empty; private: diff --git a/ydb/library/actors/wilson/wilson_trace.h b/ydb/library/actors/wilson/wilson_trace.h index 6d63ee87347d..7ab94b5cf943 100644 --- a/ydb/library/actors/wilson/wilson_trace.h +++ b/ydb/library/actors/wilson/wilson_trace.h @@ -204,6 +204,10 @@ namespace NWilson { return Verbosity; } + ui32 GetTimeToLive() const { + return TimeToLive; + } + const void *GetTraceIdPtr() const { return TraceId.data(); } static constexpr size_t GetTraceIdSize() { return sizeof(TTrace); } const void *GetSpanIdPtr() const { return &SpanId; } From 5371a0bc935950eeb1695b65e0d38673e766f1eb Mon Sep 17 00:00:00 2001 From: Sergey Belyakov Date: Mon, 12 Aug 2024 11:40:55 +0300 Subject: [PATCH 03/13] Wrap ctor arguments in structs (#7631) --- ydb/core/blobstorage/dsproxy/dsproxy.h | 300 +++++++++++------ .../dsproxy/dsproxy_assimilate.cpp | 24 +- .../blobstorage/dsproxy/dsproxy_block.cpp | 27 +- .../blobstorage/dsproxy/dsproxy_collect.cpp | 43 +-- .../blobstorage/dsproxy/dsproxy_discover.cpp | 35 +- .../dsproxy/dsproxy_discover_m3dc.cpp | 35 +- .../dsproxy/dsproxy_discover_m3of4.cpp | 35 +- ydb/core/blobstorage/dsproxy/dsproxy_get.cpp | 41 +-- .../dsproxy/dsproxy_indexrestoreget.cpp | 37 +-- .../dsproxy/dsproxy_multicollect.cpp | 45 +-- .../blobstorage/dsproxy/dsproxy_multiget.cpp | 39 +-- .../blobstorage/dsproxy/dsproxy_patch.cpp | 41 +-- ydb/core/blobstorage/dsproxy/dsproxy_put.cpp | 106 ++---- .../blobstorage/dsproxy/dsproxy_range.cpp | 37 +-- .../blobstorage/dsproxy/dsproxy_request.cpp | 309 +++++++++++++++--- .../blobstorage/dsproxy/dsproxy_status.cpp | 18 +- .../dsproxy/ut/dsproxy_env_mock_ut.h | 85 ++++- .../blobstorage/dsproxy/ut_fat/dsproxy_ut.cpp | 21 +- 18 files changed, 747 insertions(+), 531 deletions(-) diff --git a/ydb/core/blobstorage/dsproxy/dsproxy.h b/ydb/core/blobstorage/dsproxy/dsproxy.h index f13f08849d64..840965a3d7aa 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy.h +++ b/ydb/core/blobstorage/dsproxy/dsproxy.h @@ -176,27 +176,47 @@ class TBlobStorageGroupRequestActor : public TActor { } template - TBlobStorageGroupRequestActor(TIntrusivePtr info, TIntrusivePtr groupQueues, - TIntrusivePtr mon, const TActorId& source, ui64 cookie, - NKikimrServices::EServiceKikimr logComponent, bool logAccEnabled, TMaybe latencyQueueKind, - TInstant now, TIntrusivePtr &storagePoolCounters, ui32 restartCounter, - NWilson::TTraceId&& traceId, const char *name, const TEv *event, - std::shared_ptr executionRelay) - : TActor(&TThis::InitialStateFunc, TDerived::ActorActivityType()) - , Info(std::move(info)) - , GroupQueues(std::move(groupQueues)) - , Mon(std::move(mon)) - , PoolCounters(storagePoolCounters) - , LogCtx(logComponent, logAccEnabled) - , ParentSpan(TWilson::BlobStorage, std::move(traceId), name) - , RestartCounter(restartCounter) + struct TCommonParameters { + TIntrusivePtr GroupInfo; + TIntrusivePtr GroupQueues; + TIntrusivePtr Mon; + TActorId Source = TActorId{}; + ui64 Cookie = 0; + TInstant Now; + TIntrusivePtr& StoragePoolCounters; + ui32 RestartCounter; + NWilson::TTraceId TraceId = {}; + TEv* Event = nullptr; + std::shared_ptr ExecutionRelay = nullptr; + + bool LogAccEnabled = false; + TMaybe LatencyQueueKind = {}; + }; + + struct TTypeSpecificParameters { + NKikimrServices::EServiceKikimr LogComponent; + const char* Name; + NKikimrServices::TActivity::EType Activity; + }; + +public: + template + TBlobStorageGroupRequestActor(TGroupRequestParameters& params) + : TActor(&TThis::InitialStateFunc, params.TypeSpecific.Activity) + , Info(std::move(params.Common.GroupInfo)) + , GroupQueues(std::move(params.Common.GroupQueues)) + , Mon(std::move(params.Common.Mon)) + , PoolCounters(params.Common.StoragePoolCounters) + , LogCtx(params.TypeSpecific.LogComponent, params.Common.LogAccEnabled) + , ParentSpan(TWilson::BlobStorage, std::move(params.Common.TraceId), params.TypeSpecific.Name) + , RestartCounter(params.Common.RestartCounter) , CostModel(GroupQueues->CostModel) - , Source(source) - , Cookie(cookie) - , LatencyQueueKind(latencyQueueKind) - , RequestStartTime(now) + , Source(params.Common.Source) + , Cookie(params.Common.Cookie) + , LatencyQueueKind(params.Common.LatencyQueueKind) + , RequestStartTime(params.Common.Now) , RacingDomains(&Info->GetTopology()) - , ExecutionRelay(std::move(executionRelay)) + , ExecutionRelay(std::move(params.Common.ExecutionRelay)) { TDerived::ActiveCounter(Mon)->Inc(); @@ -207,7 +227,7 @@ class TBlobStorageGroupRequestActor : public TActor { ParentSpan.Link(Span.GetTraceId()); Span.Attribute("GroupId", Info->GroupID.GetRawId()); Span.Attribute("RestartCounter", RestartCounter); - event->ToSpan(Span); + params.Common.Event->ToSpan(Span); } Y_ABORT_UNLESS(CostModel); @@ -656,92 +676,160 @@ void Decrypt(char *destination, const char *source, size_t shift, size_t sizeByt const TBlobStorageGroupInfo &info); void DecryptInplace(TRope& rope, ui32 offset, ui32 shift, ui32 size, const TLogoBlobID& id, const TBlobStorageGroupInfo& info); -IActor* CreateBlobStorageGroupRangeRequest(const TIntrusivePtr &info, - const TIntrusivePtr &state, const TActorId &source, - const TIntrusivePtr &mon, TEvBlobStorage::TEvRange *ev, - ui64 cookie, NWilson::TTraceId traceId, TInstant now, TIntrusivePtr &storagePoolCounters); - -IActor* CreateBlobStorageGroupPutRequest(const TIntrusivePtr &info, - const TIntrusivePtr &state, const TActorId &source, - const TIntrusivePtr &mon, TEvBlobStorage::TEvPut *ev, - ui64 cookie, NWilson::TTraceId traceId, bool timeStatsEnabled, - TDiskResponsivenessTracker::TPerDiskStatsPtr stats, - TMaybe latencyQueueKind, TInstant now, TIntrusivePtr &storagePoolCounters, - bool enableRequestMod3x3ForMinLatecy); - -IActor* CreateBlobStorageGroupPutRequest(const TIntrusivePtr &info, - const TIntrusivePtr &state, - const TIntrusivePtr &mon, - TBatchedVec &ev, - bool timeStatsEnabled, - TDiskResponsivenessTracker::TPerDiskStatsPtr stats, - TMaybe latencyQueueKind, TInstant now, TIntrusivePtr &storagePoolCounters, - NKikimrBlobStorage::EPutHandleClass handleClass, TEvBlobStorage::TEvPut::ETactic tactic, - bool enableRequestMod3x3ForMinLatecy); - -IActor* CreateBlobStorageGroupGetRequest(const TIntrusivePtr &info, - const TIntrusivePtr &state, const TActorId &source, - const TIntrusivePtr &mon, TEvBlobStorage::TEvGet *ev, - ui64 cookie, NWilson::TTraceId traceId, TNodeLayoutInfoPtr&& nodeLayout, - TMaybe latencyQueueKind, TInstant now, TIntrusivePtr &storagePoolCounters); - -IActor* CreateBlobStorageGroupPatchRequest(const TIntrusivePtr &info, - const TIntrusivePtr &state, const TActorId &source, - const TIntrusivePtr &mon, TEvBlobStorage::TEvPatch *ev, - ui64 cookie, NWilson::TTraceId traceId, TInstant now, TIntrusivePtr &storagePoolCounters, - bool useVPatch); - -IActor* CreateBlobStorageGroupMultiGetRequest(const TIntrusivePtr &info, - const TIntrusivePtr &state, const TActorId &source, - const TIntrusivePtr &mon, TEvBlobStorage::TEvGet *ev, - ui64 cookie, NWilson::TTraceId traceId, TMaybe latencyQueueKind, - TInstant now, TIntrusivePtr &storagePoolCounters); - -IActor* CreateBlobStorageGroupIndexRestoreGetRequest(const TIntrusivePtr &info, - const TIntrusivePtr &state, const TActorId &source, - const TIntrusivePtr &mon, TEvBlobStorage::TEvGet *ev, - ui64 cookie, NWilson::TTraceId traceId, TMaybe latencyQueueKind, - TInstant now, TIntrusivePtr &storagePoolCounters); - -IActor* CreateBlobStorageGroupDiscoverRequest(const TIntrusivePtr &info, - const TIntrusivePtr &state, const TActorId &source, - const TIntrusivePtr &mon, TEvBlobStorage::TEvDiscover *ev, - ui64 cookie, NWilson::TTraceId traceId, TInstant now, TIntrusivePtr &storagePoolCounters); - -IActor* CreateBlobStorageGroupMirror3dcDiscoverRequest(const TIntrusivePtr &info, - const TIntrusivePtr &state, const TActorId &source, - const TIntrusivePtr &mon, TEvBlobStorage::TEvDiscover *ev, - ui64 cookie, NWilson::TTraceId traceId, TInstant now, TIntrusivePtr &storagePoolCounters); - -IActor* CreateBlobStorageGroupMirror3of4DiscoverRequest(const TIntrusivePtr &info, - const TIntrusivePtr &state, const TActorId &source, - const TIntrusivePtr &mon, TEvBlobStorage::TEvDiscover *ev, - ui64 cookie, NWilson::TTraceId traceId, TInstant now, TIntrusivePtr &storagePoolCounters); - -IActor* CreateBlobStorageGroupCollectGarbageRequest(const TIntrusivePtr &info, - const TIntrusivePtr &state, const TActorId &source, - const TIntrusivePtr &mon, TEvBlobStorage::TEvCollectGarbage *ev, - ui64 cookie, NWilson::TTraceId traceId, TInstant now, TIntrusivePtr &storagePoolCounters); - -IActor* CreateBlobStorageGroupMultiCollectRequest(const TIntrusivePtr &info, - const TIntrusivePtr &state, const TActorId &source, - const TIntrusivePtr &mon, TEvBlobStorage::TEvCollectGarbage *ev, - ui64 cookie, NWilson::TTraceId traceId, TInstant now, TIntrusivePtr &storagePoolCounters); - -IActor* CreateBlobStorageGroupBlockRequest(const TIntrusivePtr &info, - const TIntrusivePtr &state, const TActorId &source, - const TIntrusivePtr &mon, TEvBlobStorage::TEvBlock *ev, - ui64 cookie, NWilson::TTraceId traceId, TInstant now, TIntrusivePtr &storagePoolCounters); - -IActor* CreateBlobStorageGroupStatusRequest(const TIntrusivePtr &info, - const TIntrusivePtr &state, const TActorId &source, - const TIntrusivePtr &mon, TEvBlobStorage::TEvStatus *ev, - ui64 cookie, NWilson::TTraceId traceId, TInstant now, TIntrusivePtr &storagePoolCounters); - -IActor* CreateBlobStorageGroupAssimilateRequest(const TIntrusivePtr& info, - const TIntrusivePtr& state, const TActorId& source, - const TIntrusivePtr& mon, TEvBlobStorage::TEvAssimilate *ev, - ui64 cookie, NWilson::TTraceId traceId, TInstant now, TIntrusivePtr& storagePoolCounters); +struct TBlobStorageGroupRangeParameters { + TBlobStorageGroupRequestActor::TCommonParameters Common; + TBlobStorageGroupRequestActor::TTypeSpecificParameters TypeSpecific = { + .LogComponent = NKikimrServices::BS_PROXY_RANGE, + .Name = "DSProxy.Range", + .Activity = NKikimrServices::TActivity::BS_GROUP_RANGE + , + }; +}; +IActor* CreateBlobStorageGroupRangeRequest(TBlobStorageGroupRangeParameters params); + +struct TBlobStorageGroupPutParameters { + TBlobStorageGroupRequestActor::TCommonParameters Common; + TBlobStorageGroupRequestActor::TTypeSpecificParameters TypeSpecific = { + .LogComponent = NKikimrServices::BS_PROXY_PUT, + .Name = "DSProxy.Put", + .Activity = NKikimrServices::TActivity::BS_PROXY_PUT_ACTOR, + }; + bool TimeStatsEnabled; + TDiskResponsivenessTracker::TPerDiskStatsPtr Stats; + bool EnableRequestMod3x3ForMinLatency; +}; +IActor* CreateBlobStorageGroupPutRequest(TBlobStorageGroupPutParameters params); + +struct TBlobStorageGroupMultiPutParameters { + TBlobStorageGroupRequestActor::TCommonParameters Common; + TBlobStorageGroupRequestActor::TTypeSpecificParameters TypeSpecific = { + .LogComponent = NKikimrServices::BS_PROXY_PUT, + .Name = "DSProxy.Put", + .Activity = NKikimrServices::TActivity::BS_PROXY_PUT_ACTOR, + }; + + TBatchedVec& Events; + bool TimeStatsEnabled; + TDiskResponsivenessTracker::TPerDiskStatsPtr Stats; + NKikimrBlobStorage::EPutHandleClass HandleClass; + TEvBlobStorage::TEvPut::ETactic Tactic; + bool EnableRequestMod3x3ForMinLatency; + + static ui32 CalculateRestartCounter(TBatchedVec& events) { + ui32 maxRestarts = 0; + for (const auto& ev : events) { + maxRestarts = std::max(maxRestarts, ev->Get()->RestartCounter); + } + return maxRestarts; + } +}; +IActor* CreateBlobStorageGroupPutRequest(TBlobStorageGroupMultiPutParameters params); + +struct TBlobStorageGroupGetParameters { + TBlobStorageGroupRequestActor::TCommonParameters Common; + TBlobStorageGroupRequestActor::TTypeSpecificParameters TypeSpecific = { + .LogComponent = NKikimrServices::BS_PROXY_GET, + .Name = "DSProxy.Get", + .Activity = NKikimrServices::TActivity::BS_PROXY_GET_ACTOR, + }; + TNodeLayoutInfoPtr NodeLayout; +}; +IActor* CreateBlobStorageGroupGetRequest(TBlobStorageGroupGetParameters params); + +struct TBlobStorageGroupPatchParameters { + TBlobStorageGroupRequestActor::TCommonParameters Common; + TBlobStorageGroupRequestActor::TTypeSpecificParameters TypeSpecific = { + .LogComponent = NKikimrServices::BS_PROXY_PATCH, + .Name = "DSProxy.Patch", + .Activity = NKikimrServices::TActivity::BS_PROXY_PATCH_ACTOR, + }; + + bool UseVPatch = false; +}; +IActor* CreateBlobStorageGroupPatchRequest(TBlobStorageGroupPatchParameters params); + +struct TBlobStorageGroupMultiGetParameters { + TBlobStorageGroupRequestActor::TCommonParameters Common; + TBlobStorageGroupRequestActor::TTypeSpecificParameters TypeSpecific = { + .LogComponent = NKikimrServices::BS_PROXY_MULTIGET, + .Name = "DSProxy.MultiGet", + .Activity = NKikimrServices::TActivity::BS_PROXY_MULTIGET_ACTOR, + }; + bool UseVPatch = false; +}; +IActor* CreateBlobStorageGroupMultiGetRequest(TBlobStorageGroupMultiGetParameters params); + +struct TBlobStorageGroupRestoreGetParameters { + TBlobStorageGroupRequestActor::TCommonParameters Common; + TBlobStorageGroupRequestActor::TTypeSpecificParameters TypeSpecific = { + .LogComponent = NKikimrServices::BS_PROXY_INDEXRESTOREGET, + .Name = "DSProxy.IndexRestoreGet", + .Activity = NKikimrServices::TActivity::BS_PROXY_INDEXRESTOREGET_ACTOR, + }; +}; +IActor* CreateBlobStorageGroupIndexRestoreGetRequest(TBlobStorageGroupRestoreGetParameters params); + +struct TBlobStorageGroupDiscoverParameters { + TBlobStorageGroupRequestActor::TCommonParameters Common; + TBlobStorageGroupRequestActor::TTypeSpecificParameters TypeSpecific = { + .LogComponent = NKikimrServices::BS_PROXY_DISCOVER, + .Name = "DSProxy.Discover", + .Activity = NKikimrServices::TActivity::BS_GROUP_DISCOVER, + }; +}; +IActor* CreateBlobStorageGroupDiscoverRequest(TBlobStorageGroupDiscoverParameters params); +IActor* CreateBlobStorageGroupMirror3dcDiscoverRequest(TBlobStorageGroupDiscoverParameters params); +IActor* CreateBlobStorageGroupMirror3of4DiscoverRequest(TBlobStorageGroupDiscoverParameters params); + +struct TBlobStorageGroupCollectGarbageParameters { + TBlobStorageGroupRequestActor::TCommonParameters Common; + TBlobStorageGroupRequestActor::TTypeSpecificParameters TypeSpecific = { + .LogComponent = NKikimrServices::BS_PROXY_COLLECT, + .Name = "DSProxy.CollectGarbage", + .Activity = NKikimrServices::TActivity::BS_GROUP_COLLECT_GARBAGE, + }; +}; +IActor* CreateBlobStorageGroupCollectGarbageRequest(TBlobStorageGroupCollectGarbageParameters params); + +struct TBlobStorageGroupMultiCollectParameters { + TBlobStorageGroupRequestActor::TCommonParameters Common; + TBlobStorageGroupRequestActor::TTypeSpecificParameters TypeSpecific = { + .LogComponent = NKikimrServices::BS_PROXY_MULTICOLLECT, + .Name = "DSProxy.MultiCollect", + .Activity = NKikimrServices::TActivity::BS_PROXY_MULTICOLLECT_ACTOR, + }; +}; +IActor* CreateBlobStorageGroupMultiCollectRequest(TBlobStorageGroupMultiCollectParameters params); + +struct TBlobStorageGroupBlockParameters { + TBlobStorageGroupRequestActor::TCommonParameters Common; + TBlobStorageGroupRequestActor::TTypeSpecificParameters TypeSpecific = { + .LogComponent = NKikimrServices::BS_PROXY_BLOCK, + .Name = "DSProxy.Block", + .Activity = NKikimrServices::TActivity::BS_GROUP_BLOCK, + }; +}; +IActor* CreateBlobStorageGroupBlockRequest(TBlobStorageGroupBlockParameters params); + +struct TBlobStorageGroupStatusParameters { + TBlobStorageGroupRequestActor::TCommonParameters Common; + TBlobStorageGroupRequestActor::TTypeSpecificParameters TypeSpecific = { + .LogComponent = NKikimrServices::BS_PROXY_STATUS, + .Name = "DSProxy.Status", + .Activity = NKikimrServices::TActivity::BS_PROXY_STATUS_ACTOR, + }; +}; +IActor* CreateBlobStorageGroupStatusRequest(TBlobStorageGroupStatusParameters params); + +struct TBlobStorageGroupAssimilateParameters { + TBlobStorageGroupRequestActor::TCommonParameters Common; + TBlobStorageGroupRequestActor::TTypeSpecificParameters TypeSpecific = { + .LogComponent = NKikimrServices::BS_PROXY_ASSIMILATE, + .Name = "DSProxy.Assimilate", + .Activity = NKikimrServices::TActivity::BS_GROUP_ASSIMILATE, + }; +}; +IActor* CreateBlobStorageGroupAssimilateRequest(TBlobStorageGroupAssimilateParameters params); IActor* CreateBlobStorageGroupEjectedProxy(ui32 groupId, TIntrusivePtr &nodeMon); diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_assimilate.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_assimilate.cpp index 5ff4788e5cc3..863ee6c01c34 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_assimilate.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_assimilate.cpp @@ -264,17 +264,12 @@ class TBlobStorageGroupAssimilateRequest : public TBlobStorageGroupRequestActor< return mon->ActiveAssimilate; } - TBlobStorageGroupAssimilateRequest(const TIntrusivePtr& info, - const TIntrusivePtr& state, const TActorId& source, - const TIntrusivePtr& mon, TEvBlobStorage::TEvAssimilate *ev, ui64 cookie, - NWilson::TTraceId traceId, TInstant now, TIntrusivePtr& storagePoolCounters) - : TBlobStorageGroupRequestActor(info, state, mon, source, cookie, - NKikimrServices::BS_PROXY_ASSIMILATE, false, {}, now, storagePoolCounters, ev->RestartCounter, - std::move(traceId), "DSProxy.Assimilate", ev, std::move(ev->ExecutionRelay)) - , SkipBlocksUpTo(ev->SkipBlocksUpTo) - , SkipBarriersUpTo(ev->SkipBarriersUpTo) - , SkipBlobsUpTo(ev->SkipBlobsUpTo) - , PerVDiskInfo(info->GetTotalVDisksNum()) + TBlobStorageGroupAssimilateRequest(TBlobStorageGroupAssimilateParameters& params) + : TBlobStorageGroupRequestActor(params) + , SkipBlocksUpTo(params.Common.Event->SkipBlocksUpTo) + , SkipBarriersUpTo(params.Common.Event->SkipBarriersUpTo) + , SkipBlobsUpTo(params.Common.Event->SkipBlobsUpTo) + , PerVDiskInfo(Info->GetTotalVDisksNum()) , Result(new TEvBlobStorage::TEvAssimilateResult(NKikimrProto::OK, {})) { Heap.reserve(PerVDiskInfo.size()); @@ -464,11 +459,8 @@ class TBlobStorageGroupAssimilateRequest : public TBlobStorageGroupRequestActor< } }; -IActor* CreateBlobStorageGroupAssimilateRequest(const TIntrusivePtr& info, - const TIntrusivePtr& state, const TActorId& source, - const TIntrusivePtr& mon, TEvBlobStorage::TEvAssimilate *ev, - ui64 cookie, NWilson::TTraceId traceId, TInstant now, TIntrusivePtr& storagePoolCounters) { - return new TBlobStorageGroupAssimilateRequest(info, state, source, mon, ev, cookie, std::move(traceId), now, storagePoolCounters); +IActor* CreateBlobStorageGroupAssimilateRequest(TBlobStorageGroupAssimilateParameters params) { + return new TBlobStorageGroupAssimilateRequest(params); } } // NKikimr diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_block.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_block.cpp index 8f56ec8b1cb7..5f93958da094 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_block.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_block.cpp @@ -132,19 +132,13 @@ class TBlobStorageGroupBlockRequest : public TBlobStorageGroupRequestActorActiveBlock; } - TBlobStorageGroupBlockRequest(const TIntrusivePtr &info, - const TIntrusivePtr &state, const TActorId &source, - const TIntrusivePtr &mon, TEvBlobStorage::TEvBlock *ev, - ui64 cookie, NWilson::TTraceId&& traceId, TInstant now, - TIntrusivePtr &storagePoolCounters) - : TBlobStorageGroupRequestActor(info, state, mon, source, cookie, - NKikimrServices::BS_PROXY_BLOCK, false, {}, now, storagePoolCounters, ev->RestartCounter, - std::move(traceId), "DSProxy.Block", ev, std::move(ev->ExecutionRelay)) - , TabletId(ev->TabletId) - , Generation(ev->Generation) - , Deadline(ev->Deadline) - , IssuerGuid(ev->IssuerGuid) - , StartTime(now) + TBlobStorageGroupBlockRequest(TBlobStorageGroupBlockParameters& params) + : TBlobStorageGroupRequestActor(params) + , TabletId(params.Common.Event->TabletId) + , Generation(params.Common.Event->Generation) + , Deadline(params.Common.Event->Deadline) + , IssuerGuid(params.Common.Event->IssuerGuid) + , StartTime(params.Common.Now) , QuorumTracker(Info.Get()) {} @@ -175,11 +169,8 @@ class TBlobStorageGroupBlockRequest : public TBlobStorageGroupRequestActor &info, - const TIntrusivePtr &state, const TActorId &source, - const TIntrusivePtr &mon, TEvBlobStorage::TEvBlock *ev, - ui64 cookie, NWilson::TTraceId traceId, TInstant now, TIntrusivePtr &storagePoolCounters) { - return new TBlobStorageGroupBlockRequest(info, state, source, mon, ev, cookie, std::move(traceId), now, storagePoolCounters); +IActor* CreateBlobStorageGroupBlockRequest(TBlobStorageGroupBlockParameters params) { + return new TBlobStorageGroupBlockRequest(params); } } // NKikimr diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_collect.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_collect.cpp index 4d94f34be55f..b829103e2345 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_collect.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_collect.cpp @@ -141,27 +141,22 @@ class TBlobStorageGroupCollectGarbageRequest : public TBlobStorageGroupRequestAc return mon->ActiveCollectGarbage; } - TBlobStorageGroupCollectGarbageRequest(const TIntrusivePtr &info, - const TIntrusivePtr &state, const TActorId &source, - const TIntrusivePtr &mon, TEvBlobStorage::TEvCollectGarbage *ev, ui64 cookie, - NWilson::TTraceId traceId, TInstant now, TIntrusivePtr &storagePoolCounters) - : TBlobStorageGroupRequestActor(info, state, mon, source, cookie, - NKikimrServices::BS_PROXY_COLLECT, false, {}, now, storagePoolCounters, ev->RestartCounter, - std::move(traceId), "DSProxy.CollectGarbage", ev, std::move(ev->ExecutionRelay)) - , TabletId(ev->TabletId) - , RecordGeneration(ev->RecordGeneration) - , PerGenerationCounter(ev->PerGenerationCounter) - , Channel(ev->Channel) - , Deadline(ev->Deadline) - , Keep(ev->Keep.Release()) - , DoNotKeep(ev->DoNotKeep.Release()) - , CollectGeneration(ev->CollectGeneration) - , CollectStep(ev->CollectStep) - , Hard(ev->Hard) - , Collect(ev->Collect) - , Decommission(ev->Decommission) + TBlobStorageGroupCollectGarbageRequest(TBlobStorageGroupCollectGarbageParameters& params) + : TBlobStorageGroupRequestActor(params) + , TabletId(params.Common.Event->TabletId) + , RecordGeneration(params.Common.Event->RecordGeneration) + , PerGenerationCounter(params.Common.Event->PerGenerationCounter) + , Channel(params.Common.Event->Channel) + , Deadline(params.Common.Event->Deadline) + , Keep(params.Common.Event->Keep.Release()) + , DoNotKeep(params.Common.Event->DoNotKeep.Release()) + , CollectGeneration(params.Common.Event->CollectGeneration) + , CollectStep(params.Common.Event->CollectStep) + , Hard(params.Common.Event->Hard) + , Collect(params.Common.Event->Collect) + , Decommission(params.Common.Event->Decommission) , QuorumTracker(Info.Get()) - , StartTime(now) + , StartTime(params.Common.Now) {} void Bootstrap() { @@ -205,12 +200,8 @@ class TBlobStorageGroupCollectGarbageRequest : public TBlobStorageGroupRequestAc } }; -IActor* CreateBlobStorageGroupCollectGarbageRequest(const TIntrusivePtr &info, - const TIntrusivePtr &state, const TActorId &source, - const TIntrusivePtr &mon, TEvBlobStorage::TEvCollectGarbage *ev, - ui64 cookie, NWilson::TTraceId traceId, TInstant now, TIntrusivePtr &storagePoolCounters) { - return new TBlobStorageGroupCollectGarbageRequest(info, state, source, mon, ev, cookie, std::move(traceId), now, - storagePoolCounters); +IActor* CreateBlobStorageGroupCollectGarbageRequest(TBlobStorageGroupCollectGarbageParameters params) { + return new TBlobStorageGroupCollectGarbageRequest(params); } } // NKikimr diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_discover.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_discover.cpp index f05d9777a194..deeaf7a5b65a 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_discover.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_discover.cpp @@ -877,24 +877,18 @@ class TBlobStorageGroupDiscoverRequest : public TBlobStorageGroupRequestActor &info, - const TIntrusivePtr &state, const TActorId &source, - const TIntrusivePtr mon, TEvBlobStorage::TEvDiscover *ev, - ui64 cookie, NWilson::TTraceId traceId, TInstant now, - TIntrusivePtr &storagePoolCounters) - : TBlobStorageGroupRequestActor(info, state, mon, source, cookie, - NKikimrServices::BS_PROXY_DISCOVER, true, {}, now, storagePoolCounters, ev->RestartCounter, - std::move(traceId), "DSProxy.Discover", ev, std::move(ev->ExecutionRelay)) - , TabletId(ev->TabletId) - , MinGeneration(ev->MinGeneration) - , ReadBody(ev->ReadBody) - , DiscoverBlockedGeneration(ev->DiscoverBlockedGeneration) - , Deadline(ev->Deadline) - , StartTime(now) + TBlobStorageGroupDiscoverRequest(TBlobStorageGroupDiscoverParameters& params) + : TBlobStorageGroupRequestActor(params) + , TabletId(params.Common.Event->TabletId) + , MinGeneration(params.Common.Event->MinGeneration) + , ReadBody(params.Common.Event->ReadBody) + , DiscoverBlockedGeneration(params.Common.Event->DiscoverBlockedGeneration) + , Deadline(params.Common.Event->Deadline) + , StartTime(params.Common.Now) , GroupResponseTracker(Info) , IsGetBlockDone(!DiscoverBlockedGeneration) - , ForceBlockedGeneration(ev->ForceBlockedGeneration) - , FromLeader(ev->FromLeader) + , ForceBlockedGeneration(params.Common.Event->ForceBlockedGeneration) + , FromLeader(params.Common.Event->FromLeader) {} void Bootstrap() { @@ -973,13 +967,8 @@ class TBlobStorageGroupDiscoverRequest : public TBlobStorageGroupRequestActor &info, - const TIntrusivePtr &state, const TActorId &source, - const TIntrusivePtr &mon, TEvBlobStorage::TEvDiscover *ev, - ui64 cookie, NWilson::TTraceId traceId, TInstant now, - TIntrusivePtr &storagePoolCounters) { - return new TBlobStorageGroupDiscoverRequest(info, state, source, mon, ev, cookie, std::move(traceId), now, - storagePoolCounters); +IActor* CreateBlobStorageGroupDiscoverRequest(TBlobStorageGroupDiscoverParameters params) { + return new TBlobStorageGroupDiscoverRequest(params); } }//NKikimr diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_discover_m3dc.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_discover_m3dc.cpp index 5012636cfc65..d6a0cd3db357 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_discover_m3dc.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_discover_m3dc.cpp @@ -457,22 +457,16 @@ class TBlobStorageGroupMirror3dcDiscoverRequest : public TBlobStorageGroupReques return ERequestType::Discover; } - TBlobStorageGroupMirror3dcDiscoverRequest(TIntrusivePtr info, - TIntrusivePtr state, const TActorId& source, - TIntrusivePtr mon, TEvBlobStorage::TEvDiscover *ev, - ui64 cookie, NWilson::TTraceId traceId, TInstant now, - TIntrusivePtr &storagePoolCounters) - : TBlobStorageGroupRequestActor(std::move(info), std::move(state), std::move(mon), source, cookie, - NKikimrServices::BS_PROXY_DISCOVER, false, {}, now, storagePoolCounters, ev->RestartCounter, - std::move(traceId), "DSProxy.Discover", ev, std::move(ev->ExecutionRelay)) - , TabletId(ev->TabletId) - , MinGeneration(ev->MinGeneration) - , StartTime(now) - , Deadline(ev->Deadline) - , ReadBody(ev->ReadBody) - , DiscoverBlockedGeneration(ev->DiscoverBlockedGeneration) - , ForceBlockedGeneration(ev->ForceBlockedGeneration) - , FromLeader(ev->FromLeader) + TBlobStorageGroupMirror3dcDiscoverRequest(TBlobStorageGroupDiscoverParameters& params) + : TBlobStorageGroupRequestActor(params) + , TabletId(params.Common.Event->TabletId) + , MinGeneration(params.Common.Event->MinGeneration) + , StartTime(params.Common.Now) + , Deadline(params.Common.Event->Deadline) + , ReadBody(params.Common.Event->ReadBody) + , DiscoverBlockedGeneration(params.Common.Event->DiscoverBlockedGeneration) + , ForceBlockedGeneration(params.Common.Event->ForceBlockedGeneration) + , FromLeader(params.Common.Event->FromLeader) , GetBlockTracker(Info.Get()) {} @@ -737,13 +731,8 @@ class TBlobStorageGroupMirror3dcDiscoverRequest : public TBlobStorageGroupReques } }; -IActor* CreateBlobStorageGroupMirror3dcDiscoverRequest(const TIntrusivePtr &info, - const TIntrusivePtr &state, const TActorId &source, - const TIntrusivePtr &mon, TEvBlobStorage::TEvDiscover *ev, - ui64 cookie, NWilson::TTraceId traceId, TInstant now, - TIntrusivePtr &storagePoolCounters) { - return new TBlobStorageGroupMirror3dcDiscoverRequest(info, state, source, mon, ev, cookie, std::move(traceId), now, - storagePoolCounters); +IActor* CreateBlobStorageGroupMirror3dcDiscoverRequest(TBlobStorageGroupDiscoverParameters params) { + return new TBlobStorageGroupMirror3dcDiscoverRequest(params); } }//NKikimr diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_discover_m3of4.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_discover_m3of4.cpp index c71008096b35..1e7f8c0877fb 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_discover_m3of4.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_discover_m3of4.cpp @@ -29,22 +29,16 @@ class TBlobStorageGroupMirror3of4DiscoverRequest return ERequestType::Discover; } - TBlobStorageGroupMirror3of4DiscoverRequest(TIntrusivePtr info, - TIntrusivePtr state, const TActorId& source, - TIntrusivePtr mon, TEvBlobStorage::TEvDiscover *ev, - ui64 cookie, NWilson::TTraceId traceId, TInstant now, - TIntrusivePtr &storagePoolCounters) - : TBlobStorageGroupRequestActor(std::move(info), std::move(state), std::move(mon), source, cookie, - NKikimrServices::BS_PROXY_DISCOVER, false, {}, now, storagePoolCounters, ev->RestartCounter, - std::move(traceId), "DSProxy.Discover", ev, std::move(ev->ExecutionRelay)) - , TabletId(ev->TabletId) - , MinGeneration(ev->MinGeneration) - , StartTime(now) - , Deadline(ev->Deadline) - , ReadBody(ev->ReadBody) - , DiscoverBlockedGeneration(ev->DiscoverBlockedGeneration) - , ForceBlockedGeneration(ev->ForceBlockedGeneration) - , FromLeader(ev->FromLeader) + TBlobStorageGroupMirror3of4DiscoverRequest(TBlobStorageGroupDiscoverParameters& params) + : TBlobStorageGroupRequestActor(params) + , TabletId(params.Common.Event->TabletId) + , MinGeneration(params.Common.Event->MinGeneration) + , StartTime(params.Common.Now) + , Deadline(params.Common.Event->Deadline) + , ReadBody(params.Common.Event->ReadBody) + , DiscoverBlockedGeneration(params.Common.Event->DiscoverBlockedGeneration) + , ForceBlockedGeneration(params.Common.Event->ForceBlockedGeneration) + , FromLeader(params.Common.Event->FromLeader) { for (size_t i = 0; i < DiskState.size(); ++i) { TDiskState& disk = DiskState[i]; @@ -360,13 +354,8 @@ class TBlobStorageGroupMirror3of4DiscoverRequest } }; -IActor* CreateBlobStorageGroupMirror3of4DiscoverRequest(const TIntrusivePtr &info, - const TIntrusivePtr &state, const TActorId &source, - const TIntrusivePtr &mon, TEvBlobStorage::TEvDiscover *ev, - ui64 cookie, NWilson::TTraceId traceId, TInstant now, - TIntrusivePtr &storagePoolCounters) { - return new TBlobStorageGroupMirror3of4DiscoverRequest(info, state, source, mon, ev, cookie, std::move(traceId), now, - storagePoolCounters); +IActor* CreateBlobStorageGroupMirror3of4DiscoverRequest(TBlobStorageGroupDiscoverParameters params) { + return new TBlobStorageGroupMirror3of4DiscoverRequest(params); } }//NKikimr diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_get.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_get.cpp index 7f029e926edc..c44a8534b091 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_get.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_get.cpp @@ -388,32 +388,23 @@ class TBlobStorageGroupGetRequest : public TBlobStorageGroupRequestActor& mon) { - return mon->ActiveGet; - } - - TBlobStorageGroupGetRequest(const TIntrusivePtr &info, - const TIntrusivePtr &state, const TActorId &source, - const TIntrusivePtr &mon, TEvBlobStorage::TEvGet *ev, ui64 cookie, - NWilson::TTraceId&& traceId, TNodeLayoutInfoPtr&& nodeLayout, TMaybe latencyQueueKind, - TInstant now, TIntrusivePtr &storagePoolCounters) - : TBlobStorageGroupRequestActor(info, state, mon, source, cookie, - NKikimrServices::BS_PROXY_GET, ev->IsVerboseNoDataEnabled || ev->CollectDebugInfo, - latencyQueueKind, now, storagePoolCounters, ev->RestartCounter, std::move(traceId), "DSProxy.Get", ev, - std::move(ev->ExecutionRelay)) - , GetImpl(info, state, ev, std::move(nodeLayout), LogCtx.RequestPrefix) - , Orbit(std::move(ev->Orbit)) - , Deadline(ev->Deadline) - , StartTime(now) + TBlobStorageGroupGetRequest(TBlobStorageGroupGetParameters& params) + : TBlobStorageGroupRequestActor(params) + , GetImpl(Info, GroupQueues, params.Common.Event, std::move(params.NodeLayout), + LogCtx.RequestPrefix) + , Orbit(std::move(params.Common.Event->Orbit)) + , Deadline(params.Common.Event->Deadline) + , StartTime(params.Common.Now) , StartTimePut(StartTime) - , GroupSize(info->Type.BlobSubgroupSize()) + , GroupSize(Info->Type.BlobSubgroupSize()) , ReportedBytes(0) { ReportBytes(sizeof(*this)); - MaxSaneRequests = ev->QuerySize * info->Type.TotalPartCount() * (1 + info->Type.Handoff()) * 3; + MaxSaneRequests = params.Common.Event->QuerySize * Info->Type.TotalPartCount() * + (1 + Info->Type.Handoff()) * 3; RequestBytes = GetImpl.CountRequestBytes(); - RequestHandleClass = HandleClassToHandleClass(ev->GetHandleClass); + RequestHandleClass = HandleClassToHandleClass(params.Common.Event->GetHandleClass); if (Orbit.HasShuttles()) { RootCauseTrack.IsOn = true; } @@ -466,14 +457,8 @@ class TBlobStorageGroupGetRequest : public TBlobStorageGroupRequestActor &info, - const TIntrusivePtr &state, const TActorId &source, - const TIntrusivePtr &mon, TEvBlobStorage::TEvGet *ev, - ui64 cookie, NWilson::TTraceId traceId, TNodeLayoutInfoPtr&& nodeLayout, - TMaybe latencyQueueKind, TInstant now, - TIntrusivePtr &storagePoolCounters) { - return new TBlobStorageGroupGetRequest(info, state, source, mon, ev, cookie, std::move(traceId), - std::move(nodeLayout), latencyQueueKind, now, storagePoolCounters); +IActor* CreateBlobStorageGroupGetRequest(TBlobStorageGroupGetParameters params) { + return new TBlobStorageGroupGetRequest(params); } }//NKikimr diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_indexrestoreget.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_indexrestoreget.cpp index c3ee5c87e921..fb858dfee97c 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_indexrestoreget.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_indexrestoreget.cpp @@ -266,23 +266,17 @@ class TBlobStorageGroupIndexRestoreGetRequest return ERequestType::Get; } - TBlobStorageGroupIndexRestoreGetRequest(const TIntrusivePtr &info, - const TIntrusivePtr &state, const TActorId &source, - const TIntrusivePtr &mon, TEvBlobStorage::TEvGet *ev, ui64 cookie, - NWilson::TTraceId&& traceId, TMaybe latencyQueueKind, TInstant now, - TIntrusivePtr &storagePoolCounters) - : TBlobStorageGroupRequestActor(info, state, mon, source, cookie, - NKikimrServices::BS_PROXY_INDEXRESTOREGET, false, latencyQueueKind, now, storagePoolCounters, - ev->RestartCounter, std::move(traceId), "DSProxy.IndexRestoreGet", ev, std::move(ev->ExecutionRelay)) - , QuerySize(ev->QuerySize) - , Queries(ev->Queries.Release()) - , Deadline(ev->Deadline) - , IsInternal(ev->IsInternal) - , Decommission(ev->Decommission) - , ForceBlockTabletData(ev->ForceBlockTabletData) + TBlobStorageGroupIndexRestoreGetRequest(TBlobStorageGroupRestoreGetParameters& params) + : TBlobStorageGroupRequestActor(params) + , QuerySize(params.Common.Event->QuerySize) + , Queries(params.Common.Event->Queries.Release()) + , Deadline(params.Common.Event->Deadline) + , IsInternal(params.Common.Event->IsInternal) + , Decommission(params.Common.Event->Decommission) + , ForceBlockTabletData(params.Common.Event->ForceBlockTabletData) , VGetsInFlight(0) - , StartTime(now) - , GetHandleClass(ev->GetHandleClass) + , StartTime(params.Common.Now) + , GetHandleClass(params.Common.Event->GetHandleClass) , RestoreQueriesStarted(0) , RestoreQueriesFinished(0) { @@ -298,7 +292,7 @@ class TBlobStorageGroupIndexRestoreGetRequest } // phantom checks are for non-index queries only - Y_ABORT_UNLESS(!ev->PhantomCheck); + Y_ABORT_UNLESS(!params.Common.Event->PhantomCheck); } void Bootstrap() { @@ -394,13 +388,8 @@ class TBlobStorageGroupIndexRestoreGetRequest } }; -IActor* CreateBlobStorageGroupIndexRestoreGetRequest(const TIntrusivePtr &info, - const TIntrusivePtr &state, const TActorId &source, - const TIntrusivePtr &mon, TEvBlobStorage::TEvGet *ev, - ui64 cookie, NWilson::TTraceId traceId, TMaybe latencyQueueKind, TInstant now, - TIntrusivePtr &storagePoolCounters) { - return new TBlobStorageGroupIndexRestoreGetRequest(info, state, source, mon, ev, cookie, std::move(traceId), - latencyQueueKind, now, storagePoolCounters); +IActor* CreateBlobStorageGroupIndexRestoreGetRequest(TBlobStorageGroupRestoreGetParameters params) { + return new TBlobStorageGroupIndexRestoreGetRequest(params); } }//NKikimr diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_multicollect.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_multicollect.cpp index f19a25995253..efa0e98077bb 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_multicollect.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_multicollect.cpp @@ -93,29 +93,24 @@ class TBlobStorageGroupMultiCollectRequest return mon->ActiveMultiCollect; } - TBlobStorageGroupMultiCollectRequest(const TIntrusivePtr &info, - const TIntrusivePtr &state, const TActorId &source, - const TIntrusivePtr &mon, TEvBlobStorage::TEvCollectGarbage *ev, ui64 cookie, - NWilson::TTraceId traceId, TInstant now, TIntrusivePtr &storagePoolCounters) - : TBlobStorageGroupRequestActor(info, state, mon, source, cookie, - NKikimrServices::BS_PROXY_MULTICOLLECT, false, {}, now, storagePoolCounters, 0, - std::move(traceId), "DSProxy.MultiCollect", ev, std::move(ev->ExecutionRelay)) - , Iterations(ev->PerGenerationCounterStepSize()) - , TabletId(ev->TabletId) - , RecordGeneration(ev->RecordGeneration) - , PerGenerationCounter(ev->PerGenerationCounter) - , Channel(ev->Channel) - , Keep(ev->Keep.Release()) - , DoNotKeep(ev->DoNotKeep.Release()) - , Deadline(ev->Deadline) - , CollectGeneration(ev->CollectGeneration) - , CollectStep(ev->CollectStep) - , Hard(ev->Hard) - , Collect(ev->Collect) - , Decommission(ev->Decommission) + TBlobStorageGroupMultiCollectRequest(TBlobStorageGroupMultiCollectParameters& params) + : TBlobStorageGroupRequestActor(params) + , Iterations(params.Common.Event->PerGenerationCounterStepSize()) + , TabletId(params.Common.Event->TabletId) + , RecordGeneration(params.Common.Event->RecordGeneration) + , PerGenerationCounter(params.Common.Event->PerGenerationCounter) + , Channel(params.Common.Event->Channel) + , Keep(params.Common.Event->Keep.Release()) + , DoNotKeep(params.Common.Event->DoNotKeep.Release()) + , Deadline(params.Common.Event->Deadline) + , CollectGeneration(params.Common.Event->CollectGeneration) + , CollectStep(params.Common.Event->CollectStep) + , Hard(params.Common.Event->Hard) + , Collect(params.Common.Event->Collect) + , Decommission(params.Common.Event->Decommission) , FlagRequestsInFlight(0) , CollectRequestsInFlight(0) - , StartTime(now) + , StartTime(params.Common.Now) { Y_ABORT_UNLESS(Iterations > 1); } @@ -214,12 +209,8 @@ class TBlobStorageGroupMultiCollectRequest } }; -IActor* CreateBlobStorageGroupMultiCollectRequest(const TIntrusivePtr &info, - const TIntrusivePtr &state, const TActorId &source, - const TIntrusivePtr &mon, TEvBlobStorage::TEvCollectGarbage *ev, - ui64 cookie, NWilson::TTraceId traceId, TInstant now, TIntrusivePtr &storagePoolCounters) { - return new TBlobStorageGroupMultiCollectRequest(info, state, source, mon, ev, cookie, std::move(traceId), now, - storagePoolCounters); +IActor* CreateBlobStorageGroupMultiCollectRequest(TBlobStorageGroupMultiCollectParameters params) { + return new TBlobStorageGroupMultiCollectRequest(params); } }//NKikimr diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_multiget.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_multiget.cpp index 6ea6814c0da7..31f2deea55e8 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_multiget.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_multiget.cpp @@ -95,25 +95,19 @@ class TBlobStorageGroupMultiGetRequest : public TBlobStorageGroupRequestActorActiveMultiGet; } - TBlobStorageGroupMultiGetRequest(const TIntrusivePtr &info, - const TIntrusivePtr &state, const TActorId &source, - const TIntrusivePtr &mon, TEvBlobStorage::TEvGet *ev, ui64 cookie, - NWilson::TTraceId&& traceId, TMaybe latencyQueueKind, TInstant now, - TIntrusivePtr &storagePoolCounters) - : TBlobStorageGroupRequestActor(info, state, mon, source, cookie, - NKikimrServices::BS_PROXY_MULTIGET, false, latencyQueueKind, now, storagePoolCounters, 0, - std::move(traceId), "DSProxy.MultiGet", ev, std::move(ev->ExecutionRelay)) - , QuerySize(ev->QuerySize) - , Queries(ev->Queries.Release()) - , Deadline(ev->Deadline) - , IsInternal(ev->IsInternal) - , PhantomCheck(ev->PhantomCheck) - , Decommission(ev->Decommission) + TBlobStorageGroupMultiGetRequest(TBlobStorageGroupMultiGetParameters& params) + : TBlobStorageGroupRequestActor(params) + , QuerySize(params.Common.Event->QuerySize) + , Queries(params.Common.Event->Queries.Release()) + , Deadline(params.Common.Event->Deadline) + , IsInternal(params.Common.Event->IsInternal) + , PhantomCheck(params.Common.Event->PhantomCheck) + , Decommission(params.Common.Event->Decommission) , Responses(new TEvBlobStorage::TEvGetResult::TResponse[QuerySize]) - , StartTime(now) - , MustRestoreFirst(ev->MustRestoreFirst) - , GetHandleClass(ev->GetHandleClass) - , ForceBlockTabletData(ev->ForceBlockTabletData) + , StartTime(params.Common.Now) + , MustRestoreFirst(params.Common.Event->MustRestoreFirst) + , GetHandleClass(params.Common.Event->GetHandleClass) + , ForceBlockTabletData(params.Common.Event->ForceBlockTabletData) {} void PrepareRequest(ui32 beginIdx, ui32 endIdx) { @@ -208,13 +202,8 @@ class TBlobStorageGroupMultiGetRequest : public TBlobStorageGroupRequestActor &info, - const TIntrusivePtr &state, const TActorId &source, - const TIntrusivePtr &mon, TEvBlobStorage::TEvGet *ev, - ui64 cookie, NWilson::TTraceId traceId, TMaybe latencyQueueKind, - TInstant now, TIntrusivePtr &storagePoolCounters) { - return new TBlobStorageGroupMultiGetRequest(info, state, source, mon, ev, cookie, std::move(traceId), - latencyQueueKind, now, storagePoolCounters); +IActor* CreateBlobStorageGroupMultiGetRequest(TBlobStorageGroupMultiGetParameters params) { + return new TBlobStorageGroupMultiGetRequest(params); } }//NKikimr diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_patch.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_patch.cpp index 9590da06a67f..6c6b54003113 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_patch.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_patch.cpp @@ -139,25 +139,18 @@ class TBlobStorageGroupPatchRequest : public TBlobStorageGroupRequestActor &info, - const TIntrusivePtr &state, const TActorId &source, - const TIntrusivePtr &mon, TEvBlobStorage::TEvPatch *ev, - ui64 cookie, NWilson::TTraceId&& traceId, TInstant now, - TIntrusivePtr &storagePoolCounters, - bool useVPatch = false) - : TBlobStorageGroupRequestActor(info, state, mon, source, cookie, - NKikimrServices::BS_PROXY_PATCH, false, {}, now, storagePoolCounters, - ev->RestartCounter, std::move(traceId), "DSProxy.Patch", ev, std::move(ev->ExecutionRelay)) - , OriginalGroupId(TGroupId::FromValue(ev->OriginalGroupId)) - , OriginalId(ev->OriginalId) - , PatchedId(ev->PatchedId) - , MaskForCookieBruteForcing(ev->MaskForCookieBruteForcing) - , DiffCount(ev->DiffCount) - , Diffs(ev->Diffs.Release()) - , StartTime(now) - , Deadline(ev->Deadline) - , Orbit(std::move(ev->Orbit)) - , UseVPatch(useVPatch) + TBlobStorageGroupPatchRequest(TBlobStorageGroupPatchParameters& params) + : TBlobStorageGroupRequestActor(params) + , OriginalGroupId(TGroupId::FromValue(params.Common.Event->OriginalGroupId)) + , OriginalId(params.Common.Event->OriginalId) + , PatchedId(params.Common.Event->PatchedId) + , MaskForCookieBruteForcing(params.Common.Event->MaskForCookieBruteForcing) + , DiffCount(params.Common.Event->DiffCount) + , Diffs(params.Common.Event->Diffs.Release()) + , StartTime(params.Common.Now) + , Deadline(params.Common.Event->Deadline) + , Orbit(std::move(params.Common.Event->Orbit)) + , UseVPatch(params.UseVPatch) {} void ReplyAndDie(NKikimrProto::EReplyStatus status) { @@ -1045,14 +1038,8 @@ class TBlobStorageGroupPatchRequest : public TBlobStorageGroupRequestActor &info, - const TIntrusivePtr &state, const TActorId &source, - const TIntrusivePtr &mon, TEvBlobStorage::TEvPatch *ev, - ui64 cookie, NWilson::TTraceId traceId, TInstant now, - TIntrusivePtr &storagePoolCounters, - bool useVPatch) { - return new TBlobStorageGroupPatchRequest(info, state, source, mon, ev, cookie, std::move(traceId), now, - storagePoolCounters, useVPatch); +IActor* CreateBlobStorageGroupPatchRequest(TBlobStorageGroupPatchParameters params) { + return new TBlobStorageGroupPatchRequest(params); } }//NKikimr diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_put.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_put.cpp index 52882b19f93e..71442f3cad04 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_put.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_put.cpp @@ -499,71 +499,55 @@ class TBlobStorageGroupPutRequest : public TBlobStorageGroupRequestActor &info, - const TIntrusivePtr &state, const TActorId &source, - const TIntrusivePtr &mon, TEvBlobStorage::TEvPut *ev, - ui64 cookie, NWilson::TTraceId&& traceId, bool timeStatsEnabled, - TDiskResponsivenessTracker::TPerDiskStatsPtr stats, - TMaybe latencyQueueKind, TInstant now, - TIntrusivePtr &storagePoolCounters, - bool enableRequestMod3x3ForMinLatecy) - : TBlobStorageGroupRequestActor(info, state, mon, source, cookie, - NKikimrServices::BS_PROXY_PUT, false, latencyQueueKind, now, storagePoolCounters, - ev->RestartCounter, std::move(traceId), "DSProxy.Put", ev, nullptr) - , PutImpl(info, state, ev, mon, enableRequestMod3x3ForMinLatecy, source, cookie, Span.GetTraceId()) - , WaitingVDiskResponseCount(info->GetTotalVDisksNum()) - , HandleClass(ev->HandleClass) + TBlobStorageGroupPutRequest(TBlobStorageGroupPutParameters& params) + : TBlobStorageGroupRequestActor(params) + , PutImpl(Info, GroupQueues, params.Common.Event, Mon, + params.EnableRequestMod3x3ForMinLatency, params.Common.Source, + params.Common.Cookie, Span.GetTraceId()) + , WaitingVDiskResponseCount(Info->GetTotalVDisksNum()) + , HandleClass(params.Common.Event->HandleClass) , ReportedBytes(0) - , TimeStatsEnabled(timeStatsEnabled) - , Tactic(ev->Tactic) - , Stats(std::move(stats)) - , IsMultiPutMode(false) - , IncarnationRecords(info->GetTotalVDisksNum()) - , ExpiredVDiskSet(&info->GetTopology()) + , TimeStatsEnabled(params.TimeStatsEnabled) + , Tactic(params.Common.Event->Tactic) + , Stats(std::move(params.Stats)) + , IncarnationRecords(Info->GetTotalVDisksNum()) + , ExpiredVDiskSet(&Info->GetTopology()) { - if (ev->Orbit.HasShuttles()) { + if (params.Common.Event->Orbit.HasShuttles()) { RootCauseTrack.IsOn = true; } ReportBytes(PutImpl.Blobs[0].Buffer.capacity() + sizeof(*this)); - RequestBytes = ev->Buffer.size(); + RequestBytes = params.Common.Event->Buffer.size(); RequestHandleClass = HandleClassToHandleClass(HandleClass); - MaxSaneRequests = info->Type.TotalPartCount() * (1ull + info->Type.Handoff()) * 2; - } - - ui32 MaxRestartCounter(const TBatchedVec& events) { - ui32 res = 0; - for (const auto& ev : events) { - res = Max(res, ev->Get()->RestartCounter); - } - return res; + MaxSaneRequests = Info->Type.TotalPartCount() * (1ull + Info->Type.Handoff()) * 2; } - TBlobStorageGroupPutRequest(const TIntrusivePtr &info, - const TIntrusivePtr &state, - const TIntrusivePtr &mon, TBatchedVec &events, - bool timeStatsEnabled, TDiskResponsivenessTracker::TPerDiskStatsPtr stats, - TMaybe latencyQueueKind, TInstant now, - TIntrusivePtr &storagePoolCounters, - NKikimrBlobStorage::EPutHandleClass handleClass, TEvBlobStorage::TEvPut::ETactic tactic, - bool enableRequestMod3x3ForMinLatecy) - : TBlobStorageGroupRequestActor(info, state, mon, TActorId(), 0, - NKikimrServices::BS_PROXY_PUT, false, latencyQueueKind, now, storagePoolCounters, - MaxRestartCounter(events), {}, nullptr, static_cast(nullptr), nullptr) - , PutImpl(info, state, events, mon, handleClass, tactic, enableRequestMod3x3ForMinLatecy) - , WaitingVDiskResponseCount(info->GetTotalVDisksNum()) + TBlobStorageGroupPutRequest(TBlobStorageGroupMultiPutParameters& params) + : TBlobStorageGroupRequestActor(params) + , PutImpl(Info, GroupQueues, params.Events, Mon, params.HandleClass, params.Tactic, + params.EnableRequestMod3x3ForMinLatency) + , WaitingVDiskResponseCount(Info->GetTotalVDisksNum()) , IsManyPuts(true) - , HandleClass(handleClass) + , HandleClass(params.HandleClass) , ReportedBytes(0) +<<<<<<< HEAD , TimeStatsEnabled(timeStatsEnabled) , Tactic(tactic) , Stats(std::move(stats)) , IsMultiPutMode(true) , IncarnationRecords(info->GetTotalVDisksNum()) , ExpiredVDiskSet(&info->GetTopology()) +======= + , TimeStatsEnabled(params.TimeStatsEnabled) + , Tactic(params.Tactic) + , Stats(std::move(params.Stats)) + , IncarnationRecords(Info->GetTotalVDisksNum()) + , ExpiredVDiskSet(&Info->GetTopology()) +>>>>>>> 4efd4715e9... Wrap ctor arguments in structs (#7631) { - Y_DEBUG_ABORT_UNLESS(events.size() <= MaxBatchedPutRequests); - for (auto &ev : events) { + Y_DEBUG_ABORT_UNLESS(params.Events.size() <= MaxBatchedPutRequests); + for (auto &ev : params.Events) { auto& msg = *ev->Get(); if (msg.Orbit.HasShuttles()) { RootCauseTrack.IsOn = true; @@ -577,7 +561,7 @@ class TBlobStorageGroupPutRequest : public TBlobStorageGroupRequestActorType.TotalPartCount() * (1ull + info->Type.Handoff()) * 2; + MaxSaneRequests = Info->Type.TotalPartCount() * (1ull + Info->Type.Handoff()) * 2; } void ReportBytes(i64 bytes) { @@ -752,30 +736,12 @@ class TBlobStorageGroupPutRequest : public TBlobStorageGroupRequestActor &info, - const TIntrusivePtr &state, const TActorId &source, - const TIntrusivePtr &mon, TEvBlobStorage::TEvPut *ev, - ui64 cookie, NWilson::TTraceId traceId, bool timeStatsEnabled, - TDiskResponsivenessTracker::TPerDiskStatsPtr stats, - TMaybe latencyQueueKind, TInstant now, - TIntrusivePtr &storagePoolCounters, - bool enableRequestMod3x3ForMinLatecy) { - return new TBlobStorageGroupPutRequest(info, state, source, mon, ev, cookie, std::move(traceId), timeStatsEnabled, - std::move(stats), latencyQueueKind, now, storagePoolCounters, enableRequestMod3x3ForMinLatecy); +IActor* CreateBlobStorageGroupPutRequest(TBlobStorageGroupPutParameters params) { + return new TBlobStorageGroupPutRequest(params); } -IActor* CreateBlobStorageGroupPutRequest(const TIntrusivePtr &info, - const TIntrusivePtr &state, - const TIntrusivePtr &mon, TBatchedVec &ev, - bool timeStatsEnabled, - TDiskResponsivenessTracker::TPerDiskStatsPtr stats, - TMaybe latencyQueueKind, TInstant now, - TIntrusivePtr &storagePoolCounters, - NKikimrBlobStorage::EPutHandleClass handleClass, TEvBlobStorage::TEvPut::ETactic tactic, - bool enableRequestMod3x3ForMinLatecy) { - return new TBlobStorageGroupPutRequest(info, state, mon, ev, timeStatsEnabled, - std::move(stats), latencyQueueKind, now, storagePoolCounters, handleClass, tactic, - enableRequestMod3x3ForMinLatecy); +IActor* CreateBlobStorageGroupPutRequest(TBlobStorageGroupMultiPutParameters params) { + return new TBlobStorageGroupPutRequest(params); } }//NKikimr diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_range.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_range.cpp index 188efb359cf1..3132b366b051 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_range.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_range.cpp @@ -339,23 +339,17 @@ class TBlobStorageGroupRangeRequest : public TBlobStorageGroupRequestActor &info, - const TIntrusivePtr &state, const TActorId &source, - const TIntrusivePtr &mon, TEvBlobStorage::TEvRange *ev, - ui64 cookie, NWilson::TTraceId&& traceId, TInstant now, - TIntrusivePtr &storagePoolCounters) - : TBlobStorageGroupRequestActor(info, state, mon, source, cookie, - NKikimrServices::BS_PROXY_RANGE, false, {}, now, storagePoolCounters, - ev->RestartCounter, std::move(traceId), "DSProxy.Range", ev, std::move(ev->ExecutionRelay)) - , TabletId(ev->TabletId) - , From(ev->From) - , To(ev->To) - , Deadline(ev->Deadline) - , MustRestoreFirst(ev->MustRestoreFirst) - , IsIndexOnly(ev->IsIndexOnly) - , ForceBlockedGeneration(ev->ForceBlockedGeneration) - , Decommission(ev->Decommission) - , StartTime(now) + TBlobStorageGroupRangeRequest(TBlobStorageGroupRangeParameters& params) + : TBlobStorageGroupRequestActor(params) + , TabletId(params.Common.Event->TabletId) + , From(params.Common.Event->From) + , To(params.Common.Event->To) + , Deadline(params.Common.Event->Deadline) + , MustRestoreFirst(params.Common.Event->MustRestoreFirst) + , IsIndexOnly(params.Common.Event->IsIndexOnly) + , ForceBlockedGeneration(params.Common.Event->ForceBlockedGeneration) + , Decommission(params.Common.Event->Decommission) + , StartTime(params.Common.Now) , FailedDisks(&Info->GetTopology()) {} @@ -402,13 +396,8 @@ class TBlobStorageGroupRangeRequest : public TBlobStorageGroupRequestActor &info, - const TIntrusivePtr &state, const TActorId &source, - const TIntrusivePtr &mon, TEvBlobStorage::TEvRange *ev, - ui64 cookie, NWilson::TTraceId traceId, TInstant now, - TIntrusivePtr &storagePoolCounters) { - return new TBlobStorageGroupRangeRequest(info, state, source, mon, ev, cookie, std::move(traceId), now, - storagePoolCounters); +IActor* CreateBlobStorageGroupRangeRequest(TBlobStorageGroupRangeParameters params) { + return new TBlobStorageGroupRangeRequest(params); } };//NKikimr diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_request.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_request.cpp index fd8b1c52dc93..746f86b97949 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_request.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_request.cpp @@ -44,9 +44,24 @@ namespace NKikimr { EnableWilsonTracing(ev, Mon->GetSamplePPM); if (ev->Get()->IsIndexOnly) { Mon->EventIndexRestoreGet->Inc(); - PushRequest(CreateBlobStorageGroupIndexRestoreGetRequest(Info, Sessions->GroupQueues, ev->Sender, Mon, - ev->Get(), ev->Cookie, std::move(ev->TraceId), {}, TActivationContext::Now(), StoragePoolCounters), - ev->Get()->Deadline); + PushRequest(CreateBlobStorageGroupIndexRestoreGetRequest( + TBlobStorageGroupRestoreGetParameters{ + .Common = { + .GroupInfo = Info, + .GroupQueues = Sessions->GroupQueues, + .Mon = Mon, + .Source = ev->Sender, + .Cookie = ev->Cookie, + .Now = TActivationContext::Now(), + .StoragePoolCounters = StoragePoolCounters, + .RestartCounter = ev->Get()->RestartCounter, + .TraceId = std::move(ev->TraceId), + .Event = ev->Get(), + .ExecutionRelay = ev->Get()->ExecutionRelay, + } + }), + ev->Get()->Deadline + ); } else { TLogoBlobID lastBlobId; const ui32 querySize = ev->Get()->QuerySize; @@ -78,14 +93,47 @@ namespace NKikimr { if (differentBlobCount == 1 || isSmall) { Mon->EventGet->Inc(); - PushRequest(CreateBlobStorageGroupGetRequest(Info, Sessions->GroupQueues, ev->Sender, Mon, - ev->Get(), ev->Cookie, std::move(ev->TraceId), TNodeLayoutInfoPtr(NodeLayoutInfo), - kind, TActivationContext::Now(), StoragePoolCounters), ev->Get()->Deadline); + PushRequest(CreateBlobStorageGroupGetRequest( + TBlobStorageGroupGetParameters{ + .Common = { + .GroupInfo = Info, + .GroupQueues = Sessions->GroupQueues, + .Mon = Mon, + .Source = ev->Sender, + .Cookie = ev->Cookie, + .Now = TActivationContext::Now(), + .StoragePoolCounters = StoragePoolCounters, + .RestartCounter = ev->Get()->RestartCounter, + .TraceId = std::move(ev->TraceId), + .Event = ev->Get(), + .ExecutionRelay = ev->Get()->ExecutionRelay, + .LatencyQueueKind = kind, + }, + .NodeLayout = TNodeLayoutInfoPtr(NodeLayoutInfo) + }), + ev->Get()->Deadline + ); } else { Mon->EventMultiGet->Inc(); - PushRequest(CreateBlobStorageGroupMultiGetRequest(Info, Sessions->GroupQueues, ev->Sender, Mon, - ev->Get(), ev->Cookie, std::move(ev->TraceId), kind, TActivationContext::Now(), StoragePoolCounters), - ev->Get()->Deadline); + PushRequest(CreateBlobStorageGroupMultiGetRequest( + TBlobStorageGroupMultiGetParameters{ + .Common = { + .GroupInfo = Info, + .GroupQueues = Sessions->GroupQueues, + .Mon = Mon, + .Source = ev->Sender, + .Cookie = ev->Cookie, + .Now = TActivationContext::Now(), + .StoragePoolCounters = StoragePoolCounters, + .RestartCounter = ev->Get()->RestartCounter, + .TraceId = std::move(ev->TraceId), + .Event = ev->Get(), + .ExecutionRelay = ev->Get()->ExecutionRelay, + .LatencyQueueKind = kind, + }, + }), + ev->Get()->Deadline + ); } } } @@ -155,19 +203,52 @@ namespace NKikimr { TAppData *app = NKikimr::AppData(TActivationContext::AsActorContext()); bool enableRequestMod3x3ForMinLatency = app->FeatureFlags.GetEnable3x3RequestsForMirror3DCMinLatencyPut(); // TODO(alexvru): MinLatency support - PushRequest(CreateBlobStorageGroupPutRequest(Info, Sessions->GroupQueues, ev->Sender, Mon, - ev->Get(), ev->Cookie, std::move(ev->TraceId), Mon->TimeStats.IsEnabled(), - PerDiskStats, kind, TActivationContext::Now(), StoragePoolCounters, - enableRequestMod3x3ForMinLatency), ev->Get()->Deadline); + PushRequest(CreateBlobStorageGroupPutRequest( + TBlobStorageGroupPutParameters{ + .Common = { + .GroupInfo = Info, + .GroupQueues = Sessions->GroupQueues, + .Mon = Mon, + .Source = ev->Sender, + .Cookie = ev->Cookie, + .Now = TActivationContext::Now(), + .StoragePoolCounters = StoragePoolCounters, + .RestartCounter = ev->Get()->RestartCounter, + .TraceId = std::move(ev->TraceId), + .Event = ev->Get(), + .ExecutionRelay = ev->Get()->ExecutionRelay, + .LatencyQueueKind = kind + }, + .TimeStatsEnabled = Mon->TimeStats.IsEnabled(), + .Stats = PerDiskStats, + .EnableRequestMod3x3ForMinLatency = enableRequestMod3x3ForMinLatency, + }), + ev->Get()->Deadline + ); } } void TBlobStorageGroupProxy::HandleNormal(TEvBlobStorage::TEvBlock::TPtr &ev) { EnsureMonitoring(ev->Get()->IsMonitored); Mon->EventBlock->Inc(); - PushRequest(CreateBlobStorageGroupBlockRequest(Info, Sessions->GroupQueues, ev->Sender, Mon, - ev->Get(), ev->Cookie, std::move(ev->TraceId), TActivationContext::Now(), StoragePoolCounters), - ev->Get()->Deadline); + PushRequest(CreateBlobStorageGroupBlockRequest( + TBlobStorageGroupBlockParameters{ + .Common = { + .GroupInfo = Info, + .GroupQueues = Sessions->GroupQueues, + .Mon = Mon, + .Source = ev->Sender, + .Cookie = ev->Cookie, + .Now = TActivationContext::Now(), + .StoragePoolCounters = StoragePoolCounters, + .RestartCounter = ev->Get()->RestartCounter, + .TraceId = std::move(ev->TraceId), + .Event = ev->Get(), + .ExecutionRelay = ev->Get()->ExecutionRelay + } + }), + ev->Get()->Deadline + ); } void TBlobStorageGroupProxy::HandleNormal(TEvBlobStorage::TEvPatch::TPtr &ev) { @@ -179,9 +260,25 @@ namespace NKikimr { EnsureMonitoring(true); Mon->EventPatch->Inc(); TInstant now = TActivationContext::Now(); - PushRequest(CreateBlobStorageGroupPatchRequest(Info, Sessions->GroupQueues, ev->Sender, Mon, - ev->Get(), ev->Cookie, std::move(ev->TraceId), now, StoragePoolCounters, EnableVPatch.Update(now)), - ev->Get()->Deadline); + PushRequest(CreateBlobStorageGroupPatchRequest( + TBlobStorageGroupPatchParameters{ + .Common = { + .GroupInfo = Info, + .GroupQueues = Sessions->GroupQueues, + .Mon = Mon, + .Source = ev->Sender, + .Cookie = ev->Cookie, + .Now = now, + .StoragePoolCounters = StoragePoolCounters, + .RestartCounter = ev->Get()->RestartCounter, + .TraceId = std::move(ev->TraceId), + .Event = ev->Get(), + .ExecutionRelay = ev->Get()->ExecutionRelay + }, + .UseVPatch = static_cast(EnableVPatch.Update(now)) + }), + ev->Get()->Deadline + ); } void TBlobStorageGroupProxy::HandleNormal(TEvBlobStorage::TEvDiscover::TPtr &ev) { @@ -200,8 +297,24 @@ namespace NKikimr { : Info->Type.GetErasure() == TBlobStorageGroupType::ErasureMirror3of4 ? CreateBlobStorageGroupMirror3of4DiscoverRequest : CreateBlobStorageGroupDiscoverRequest; - PushRequest(callback(Info, Sessions->GroupQueues, ev->Sender, Mon, ev->Get(), ev->Cookie, std::move(ev->TraceId), - TActivationContext::Now(), StoragePoolCounters), ev->Get()->Deadline); + PushRequest(callback( + TBlobStorageGroupDiscoverParameters{ + .Common = { + .GroupInfo = Info, + .GroupQueues = Sessions->GroupQueues, + .Mon = Mon, + .Source = ev->Sender, + .Cookie = ev->Cookie, + .Now = TActivationContext::Now(), + .StoragePoolCounters = StoragePoolCounters, + .RestartCounter = ev->Get()->RestartCounter, + .TraceId = std::move(ev->TraceId), + .Event = ev->Get(), + .ExecutionRelay = ev->Get()->ExecutionRelay + } + }), + ev->Get()->Deadline + ); } void TBlobStorageGroupProxy::HandleNormal(TEvBlobStorage::TEvRange::TPtr &ev) { @@ -212,9 +325,24 @@ namespace NKikimr { } EnsureMonitoring(true); Mon->EventRange->Inc(); - PushRequest(CreateBlobStorageGroupRangeRequest(Info, Sessions->GroupQueues, ev->Sender, Mon, - ev->Get(), ev->Cookie, std::move(ev->TraceId), TActivationContext::Now(), StoragePoolCounters), - ev->Get()->Deadline); + PushRequest(CreateBlobStorageGroupRangeRequest( + TBlobStorageGroupRangeParameters{ + .Common = { + .GroupInfo = Info, + .GroupQueues = Sessions->GroupQueues, + .Mon = Mon, + .Source = ev->Sender, + .Cookie = ev->Cookie, + .Now = TActivationContext::Now(), + .StoragePoolCounters = StoragePoolCounters, + .RestartCounter = ev->Get()->RestartCounter, + .TraceId = std::move(ev->TraceId), + .Event = ev->Get(), + .ExecutionRelay = ev->Get()->ExecutionRelay + } + }), + ev->Get()->Deadline + ); } void TBlobStorageGroupProxy::HandleNormal(TEvBlobStorage::TEvCollectGarbage::TPtr &ev) { @@ -222,14 +350,44 @@ namespace NKikimr { if (!ev->Get()->IsMultiCollectAllowed || ev->Get()->PerGenerationCounterStepSize() == 1) { Mon->EventCollectGarbage->Inc(); - PushRequest(CreateBlobStorageGroupCollectGarbageRequest(Info, Sessions->GroupQueues, - ev->Sender, Mon, ev->Get(), ev->Cookie, std::move(ev->TraceId), TActivationContext::Now(), - StoragePoolCounters), ev->Get()->Deadline); + PushRequest(CreateBlobStorageGroupCollectGarbageRequest( + TBlobStorageGroupCollectGarbageParameters{ + .Common = { + .GroupInfo = Info, + .GroupQueues = Sessions->GroupQueues, + .Mon = Mon, + .Source = ev->Sender, + .Cookie = ev->Cookie, + .Now = TActivationContext::Now(), + .StoragePoolCounters = StoragePoolCounters, + .RestartCounter = ev->Get()->RestartCounter, + .TraceId = std::move(ev->TraceId), + .Event = ev->Get(), + .ExecutionRelay = ev->Get()->ExecutionRelay + } + }), + ev->Get()->Deadline + ); } else { Mon->EventMultiCollect->Inc(); - PushRequest(CreateBlobStorageGroupMultiCollectRequest(Info, Sessions->GroupQueues, - ev->Sender, Mon, ev->Get(), ev->Cookie, std::move(ev->TraceId), TActivationContext::Now(), - StoragePoolCounters), ev->Get()->Deadline); + PushRequest(CreateBlobStorageGroupMultiCollectRequest( + TBlobStorageGroupMultiCollectParameters{ + .Common = { + .GroupInfo = Info, + .GroupQueues = Sessions->GroupQueues, + .Mon = Mon, + .Source = ev->Sender, + .Cookie = ev->Cookie, + .Now = TActivationContext::Now(), + .StoragePoolCounters = StoragePoolCounters, + .RestartCounter = ev->Get()->RestartCounter, + .TraceId = std::move(ev->TraceId), + .Event = ev->Get(), + .ExecutionRelay = ev->Get()->ExecutionRelay + } + }), + ev->Get()->Deadline + ); } } @@ -241,17 +399,47 @@ namespace NKikimr { } EnsureMonitoring(true); Mon->EventStatus->Inc(); - PushRequest(CreateBlobStorageGroupStatusRequest(Info, Sessions->GroupQueues, ev->Sender, Mon, - ev->Get(), ev->Cookie, std::move(ev->TraceId), TActivationContext::Now(), StoragePoolCounters), - ev->Get()->Deadline); + PushRequest(CreateBlobStorageGroupStatusRequest( + TBlobStorageGroupStatusParameters{ + .Common = { + .GroupInfo = Info, + .GroupQueues = Sessions->GroupQueues, + .Mon = Mon, + .Source = ev->Sender, + .Cookie = ev->Cookie, + .Now = TActivationContext::Now(), + .StoragePoolCounters = StoragePoolCounters, + .RestartCounter = ev->Get()->RestartCounter, + .TraceId = std::move(ev->TraceId), + .Event = ev->Get(), + .ExecutionRelay = ev->Get()->ExecutionRelay + } + }), + TInstant::Max() + ); } void TBlobStorageGroupProxy::HandleNormal(TEvBlobStorage::TEvAssimilate::TPtr &ev) { EnsureMonitoring(true); Mon->EventAssimilate->Inc(); - PushRequest(CreateBlobStorageGroupAssimilateRequest(Info, Sessions->GroupQueues, ev->Sender, - Mon, ev->Get(), ev->Cookie, std::move(ev->TraceId), TActivationContext::Now(), StoragePoolCounters), - TInstant::Max()); + PushRequest(CreateBlobStorageGroupAssimilateRequest( + TBlobStorageGroupAssimilateParameters{ + .Common = { + .GroupInfo = Info, + .GroupQueues = Sessions->GroupQueues, + .Mon = Mon, + .Source = ev->Sender, + .Cookie = ev->Cookie, + .Now = TActivationContext::Now(), + .StoragePoolCounters = StoragePoolCounters, + .RestartCounter = ev->Get()->RestartCounter, + .TraceId = std::move(ev->TraceId), + .Event = ev->Get(), + .ExecutionRelay = ev->Get()->ExecutionRelay + } + }), + TInstant::Max() + ); } void TBlobStorageGroupProxy::Handle(TEvDeathNote::TPtr ev) { @@ -285,14 +473,49 @@ namespace NKikimr { // TODO(alexvru): MinLatency support if (batchedPuts.Queue.size() == 1) { auto& ev = batchedPuts.Queue.front(); - PushRequest(CreateBlobStorageGroupPutRequest(Info, Sessions->GroupQueues, ev->Sender, - Mon, ev->Get(), ev->Cookie, std::move(ev->TraceId), Mon->TimeStats.IsEnabled(), PerDiskStats, - kind, TActivationContext::Now(), StoragePoolCounters, enableRequestMod3x3ForMinLatency), - ev->Get()->Deadline); + PushRequest(CreateBlobStorageGroupPutRequest( + TBlobStorageGroupPutParameters{ + .Common = { + .GroupInfo = Info, + .GroupQueues = Sessions->GroupQueues, + .Mon = Mon, + .Source = ev->Sender, + .Cookie = ev->Cookie, + .Now = TActivationContext::Now(), + .StoragePoolCounters = StoragePoolCounters, + .RestartCounter = ev->Get()->RestartCounter, + .TraceId = std::move(ev->TraceId), + .Event = ev->Get(), + .ExecutionRelay = ev->Get()->ExecutionRelay, + .LatencyQueueKind = kind, + }, + .TimeStatsEnabled = Mon->TimeStats.IsEnabled(), + .Stats = PerDiskStats, + .EnableRequestMod3x3ForMinLatency = enableRequestMod3x3ForMinLatency, + }), + ev->Get()->Deadline + ); } else { - PushRequest(CreateBlobStorageGroupPutRequest(Info, Sessions->GroupQueues, - Mon, batchedPuts.Queue, Mon->TimeStats.IsEnabled(), PerDiskStats, kind, TActivationContext::Now(), - StoragePoolCounters, handleClass, tactic, enableRequestMod3x3ForMinLatency), TInstant::Max()); + PushRequest(CreateBlobStorageGroupPutRequest( + TBlobStorageGroupMultiPutParameters{ + .Common = { + .GroupInfo = Info, + .GroupQueues = Sessions->GroupQueues, + .Mon = Mon, + .Now = TActivationContext::Now(), + .StoragePoolCounters = StoragePoolCounters, + .RestartCounter = TBlobStorageGroupMultiPutParameters::CalculateRestartCounter(batchedPuts.Queue), + .LatencyQueueKind = kind, + }, + .Events = batchedPuts.Queue, + .TimeStatsEnabled = Mon->TimeStats.IsEnabled(), + .Stats = PerDiskStats, + .HandleClass = handleClass, + .Tactic = tactic, + .EnableRequestMod3x3ForMinLatency = enableRequestMod3x3ForMinLatency, + }), + TInstant::Max() + ); } } else { for (auto it = batchedPuts.Queue.begin(); it != batchedPuts.Queue.end(); ++it) { diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_status.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_status.cpp index 9ecdd12055d0..6bab0545cd51 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_status.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_status.cpp @@ -84,14 +84,9 @@ class TBlobStorageGroupStatusRequest : public TBlobStorageGroupRequestActorActiveStatus; } - TBlobStorageGroupStatusRequest(const TIntrusivePtr &info, - const TIntrusivePtr &state, const TActorId &source, - const TIntrusivePtr &mon, TEvBlobStorage::TEvStatus *ev, - ui64 cookie, NWilson::TTraceId traceId, TInstant now, TIntrusivePtr &storagePoolCounters) - : TBlobStorageGroupRequestActor(info, state, mon, source, cookie, - NKikimrServices::BS_PROXY_STATUS, false, {}, now, storagePoolCounters, ev->RestartCounter, - std::move(traceId), "DSProxy.Status", ev, std::move(ev->ExecutionRelay)) - , Deadline(ev->Deadline) + TBlobStorageGroupStatusRequest(TBlobStorageGroupStatusParameters& params) + : TBlobStorageGroupRequestActor(params) + , Deadline(params.Common.Event->Deadline) , Requests(0) , Responses(0) , QuorumTracker(Info.Get()) @@ -134,11 +129,8 @@ class TBlobStorageGroupStatusRequest : public TBlobStorageGroupRequestActor &info, - const TIntrusivePtr &state, const TActorId &source, - const TIntrusivePtr &mon, TEvBlobStorage::TEvStatus *ev, - ui64 cookie, NWilson::TTraceId traceId, TInstant now, TIntrusivePtr &storagePoolCounters) { - return new TBlobStorageGroupStatusRequest(info, state, source, mon, ev, cookie, std::move(traceId), now, storagePoolCounters); +IActor* CreateBlobStorageGroupStatusRequest(TBlobStorageGroupStatusParameters params) { + return new TBlobStorageGroupStatusRequest(params); } } // NKikimr diff --git a/ydb/core/blobstorage/dsproxy/ut/dsproxy_env_mock_ut.h b/ydb/core/blobstorage/dsproxy/ut/dsproxy_env_mock_ut.h index c74ea2d3c32e..6ba8e8bffd99 100644 --- a/ydb/core/blobstorage/dsproxy/ut/dsproxy_env_mock_ut.h +++ b/ydb/core/blobstorage/dsproxy/ut/dsproxy_env_mock_ut.h @@ -103,33 +103,94 @@ struct TDSProxyEnv { std::unique_ptr CreatePutRequestActor(TEvBlobStorage::TEvPut::TPtr &ev) { TMaybe kind = PutHandleClassToGroupStatKind(ev->Get()->HandleClass); - return std::unique_ptr(CreateBlobStorageGroupPutRequest(Info, GroupQueues, ev->Sender, Mon, ev->Get(), - ev->Cookie, std::move(ev->TraceId), Mon->TimeStats.IsEnabled(), PerDiskStatsPtr, kind, - TInstant::Now(), StoragePoolCounters, false)); + return std::unique_ptr(CreateBlobStorageGroupPutRequest( + TBlobStorageGroupPutParameters{ + .Common = { + .GroupInfo = Info, + .GroupQueues = GroupQueues, + .Mon = Mon, + .Source = ev->Sender, + .Cookie = ev->Cookie, + .Now = TInstant::Now(), + .StoragePoolCounters = StoragePoolCounters, + .RestartCounter = ev->Get()->RestartCounter, + .TraceId = std::move(ev->TraceId), + .Event = ev->Get(), + .ExecutionRelay = ev->Get()->ExecutionRelay, + .LatencyQueueKind = kind, + }, + .TimeStatsEnabled = Mon->TimeStats.IsEnabled(), + .Stats = PerDiskStatsPtr, + .EnableRequestMod3x3ForMinLatency = false, + })); } std::unique_ptr CreatePutRequestActor(TBatchedVec &batched, TEvBlobStorage::TEvPut::ETactic tactic, NKikimrBlobStorage::EPutHandleClass handleClass) { TMaybe kind = PutHandleClassToGroupStatKind(handleClass); - return std::unique_ptr(CreateBlobStorageGroupPutRequest(Info, GroupQueues, - Mon, batched, Mon->TimeStats.IsEnabled(), PerDiskStatsPtr, kind,TInstant::Now(), - StoragePoolCounters, handleClass, tactic, false)); + return std::unique_ptr(CreateBlobStorageGroupPutRequest( + TBlobStorageGroupMultiPutParameters{ + .Common = { + .GroupInfo = Info, + .GroupQueues = GroupQueues, + .Mon = Mon, + .Now = TInstant::Now(), + .StoragePoolCounters = StoragePoolCounters, + .RestartCounter = TBlobStorageGroupMultiPutParameters::CalculateRestartCounter(batched), + .LatencyQueueKind = kind, + }, + .Events = batched, + .TimeStatsEnabled = Mon->TimeStats.IsEnabled(), + .Stats = PerDiskStatsPtr, + .HandleClass = handleClass, + .Tactic = tactic, + .EnableRequestMod3x3ForMinLatency = false, + })); } std::unique_ptr CreateGetRequestActor(TEvBlobStorage::TEvGet::TPtr &ev, NKikimrBlobStorage::EPutHandleClass handleClass) { TMaybe kind = PutHandleClassToGroupStatKind(handleClass); - return std::unique_ptr(CreateBlobStorageGroupGetRequest(Info, GroupQueues, ev->Sender, Mon, - ev->Get(), ev->Cookie, std::move(ev->TraceId), TNodeLayoutInfoPtr(NodeLayoutInfo), - kind, TInstant::Now(), StoragePoolCounters)); + return std::unique_ptr(CreateBlobStorageGroupGetRequest( + TBlobStorageGroupGetParameters{ + .Common = { + .GroupInfo = Info, + .GroupQueues = GroupQueues, + .Mon = Mon, + .Source = ev->Sender, + .Cookie = ev->Cookie, + .Now = TInstant::Now(), + .StoragePoolCounters = StoragePoolCounters, + .RestartCounter = ev->Get()->RestartCounter, + .TraceId = std::move(ev->TraceId), + .Event = ev->Get(), + .ExecutionRelay = ev->Get()->ExecutionRelay, + .LatencyQueueKind = kind, + }, + .NodeLayout = TNodeLayoutInfoPtr(NodeLayoutInfo) + })); } std::unique_ptr CreatePatchRequestActor(TEvBlobStorage::TEvPatch::TPtr &ev, bool useVPatch = false) { - return std::unique_ptr(CreateBlobStorageGroupPatchRequest(Info, GroupQueues, ev->Sender, Mon, - ev->Get(), ev->Cookie, std::move(ev->TraceId), TInstant::Now(), StoragePoolCounters, - useVPatch)); + return std::unique_ptr(CreateBlobStorageGroupPatchRequest( + TBlobStorageGroupPatchParameters{ + .Common = { + .GroupInfo = Info, + .GroupQueues = GroupQueues, + .Mon = Mon, + .Source = ev->Sender, + .Cookie = ev->Cookie, + .Now = TInstant::Now(), + .StoragePoolCounters = StoragePoolCounters, + .RestartCounter = ev->Get()->RestartCounter, + .TraceId = std::move(ev->TraceId), + .Event = ev->Get(), + .ExecutionRelay = ev->Get()->ExecutionRelay + }, + .UseVPatch = useVPatch + })); } }; diff --git a/ydb/core/blobstorage/dsproxy/ut_fat/dsproxy_ut.cpp b/ydb/core/blobstorage/dsproxy/ut_fat/dsproxy_ut.cpp index c8e0d01a41b9..79b3072da43a 100644 --- a/ydb/core/blobstorage/dsproxy/ut_fat/dsproxy_ut.cpp +++ b/ydb/core/blobstorage/dsproxy/ut_fat/dsproxy_ut.cpp @@ -3403,9 +3403,24 @@ class TTestBlobStorageProxyBatchedPutRequestDoesNotContainAHugeBlob : public TTe batched[1] = GetPut(blobIds[1], Data2); TMaybe kind = PutHandleClassToGroupStatKind(HandleClass); - IActor *reqActor = CreateBlobStorageGroupPutRequest(BsInfo, GroupQueues, - Mon, batched, false, PerDiskStatsPtr, kind,TInstant::Now(), - StoragePoolCounters, HandleClass, Tactic, false); + IActor *reqActor = CreateBlobStorageGroupPutRequest( + TBlobStorageGroupMultiPutParameters{ + .Common = { + .GroupInfo = BsInfo, + .GroupQueues = GroupQueues, + .Mon = Mon, + .Now = TInstant::Now(), + .StoragePoolCounters = StoragePoolCounters, + .RestartCounter = TBlobStorageGroupMultiPutParameters::CalculateRestartCounter(batched), + .LatencyQueueKind = kind, + }, + .Events = batched, + .TimeStatsEnabled = false, + .Stats = PerDiskStatsPtr, + .HandleClass = HandleClass, + .Tactic = Tactic, + .EnableRequestMod3x3ForMinLatency = false, + }); ctx.Register(reqActor); break; From 3a30ffee66bfe4692ad04ab3002e7fe60f03d961 Mon Sep 17 00:00:00 2001 From: Sergey Belyakov Date: Mon, 12 Aug 2024 17:38:10 +0300 Subject: [PATCH 04/13] Restore LogAccEnabled in Discover and Get requests (#7651) --- ydb/core/blobstorage/dsproxy/dsproxy_request.cpp | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_request.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_request.cpp index 746f86b97949..fe85b916350c 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_request.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_request.cpp @@ -107,6 +107,7 @@ namespace NKikimr { .TraceId = std::move(ev->TraceId), .Event = ev->Get(), .ExecutionRelay = ev->Get()->ExecutionRelay, + .LogAccEnabled = ev->Get()->IsVerboseNoDataEnabled || ev->Get()->CollectDebugInfo, .LatencyQueueKind = kind, }, .NodeLayout = TNodeLayoutInfoPtr(NodeLayoutInfo) @@ -292,9 +293,10 @@ namespace NKikimr { EnsureMonitoring(true); Mon->EventDiscover->Inc(); EnableWilsonTracing(ev, Mon->DiscoverSamplePPM); - auto&& callback = Info->Type.GetErasure() == TBlobStorageGroupType::ErasureMirror3dc + TErasureType::EErasureSpecies erasure = Info->Type.GetErasure(); + auto&& callback = erasure == TBlobStorageGroupType::ErasureMirror3dc ? CreateBlobStorageGroupMirror3dcDiscoverRequest - : Info->Type.GetErasure() == TBlobStorageGroupType::ErasureMirror3of4 + : erasure == TBlobStorageGroupType::ErasureMirror3of4 ? CreateBlobStorageGroupMirror3of4DiscoverRequest : CreateBlobStorageGroupDiscoverRequest; PushRequest(callback( @@ -310,7 +312,9 @@ namespace NKikimr { .RestartCounter = ev->Get()->RestartCounter, .TraceId = std::move(ev->TraceId), .Event = ev->Get(), - .ExecutionRelay = ev->Get()->ExecutionRelay + .ExecutionRelay = ev->Get()->ExecutionRelay, + .LogAccEnabled = (erasure != TBlobStorageGroupType::ErasureMirror3dc) && + (erasure != TBlobStorageGroupType::ErasureMirror3of4) } }), ev->Get()->Deadline From 8e1e06bb95643d489a79d76714a477954bbef9da Mon Sep 17 00:00:00 2001 From: Sergey Belyakov Date: Mon, 18 Nov 2024 15:20:25 +0000 Subject: [PATCH 05/13] Correct merge mistakes --- ydb/core/blobstorage/dsproxy/dsproxy.h | 58 ++++++++++---------- ydb/core/blobstorage/dsproxy/dsproxy_get.cpp | 4 ++ ydb/core/blobstorage/dsproxy/dsproxy_put.cpp | 11 +--- 3 files changed, 35 insertions(+), 38 deletions(-) diff --git a/ydb/core/blobstorage/dsproxy/dsproxy.h b/ydb/core/blobstorage/dsproxy/dsproxy.h index 840965a3d7aa..17eb1d8fc938 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy.h +++ b/ydb/core/blobstorage/dsproxy/dsproxy.h @@ -175,7 +175,6 @@ class TBlobStorageGroupRequestActor : public TActor { return NKikimrServices::TActivity::BS_GROUP_REQUEST; } - template struct TCommonParameters { TIntrusivePtr GroupInfo; TIntrusivePtr GroupQueues; @@ -186,7 +185,7 @@ class TBlobStorageGroupRequestActor : public TActor { TIntrusivePtr& StoragePoolCounters; ui32 RestartCounter; NWilson::TTraceId TraceId = {}; - TEv* Event = nullptr; + TDerived* Event = nullptr; std::shared_ptr ExecutionRelay = nullptr; bool LogAccEnabled = false; @@ -202,7 +201,7 @@ class TBlobStorageGroupRequestActor : public TActor { public: template TBlobStorageGroupRequestActor(TGroupRequestParameters& params) - : TActor(&TThis::InitialStateFunc, params.TypeSpecific.Activity) + : TActor(&TThis::InitialStateFunc, params.TypeSpecific.Activity) , Info(std::move(params.Common.GroupInfo)) , GroupQueues(std::move(params.Common.GroupQueues)) , Mon(std::move(params.Common.Mon)) @@ -677,8 +676,8 @@ void Decrypt(char *destination, const char *source, size_t shift, size_t sizeByt void DecryptInplace(TRope& rope, ui32 offset, ui32 shift, ui32 size, const TLogoBlobID& id, const TBlobStorageGroupInfo& info); struct TBlobStorageGroupRangeParameters { - TBlobStorageGroupRequestActor::TCommonParameters Common; - TBlobStorageGroupRequestActor::TTypeSpecificParameters TypeSpecific = { + TBlobStorageGroupRequestActor::TCommonParameters Common; + TBlobStorageGroupRequestActor::TTypeSpecificParameters TypeSpecific = { .LogComponent = NKikimrServices::BS_PROXY_RANGE, .Name = "DSProxy.Range", .Activity = NKikimrServices::TActivity::BS_GROUP_RANGE @@ -688,8 +687,8 @@ struct TBlobStorageGroupRangeParameters { IActor* CreateBlobStorageGroupRangeRequest(TBlobStorageGroupRangeParameters params); struct TBlobStorageGroupPutParameters { - TBlobStorageGroupRequestActor::TCommonParameters Common; - TBlobStorageGroupRequestActor::TTypeSpecificParameters TypeSpecific = { + TBlobStorageGroupRequestActor::TCommonParameters Common; + TBlobStorageGroupRequestActor::TTypeSpecificParameters TypeSpecific = { .LogComponent = NKikimrServices::BS_PROXY_PUT, .Name = "DSProxy.Put", .Activity = NKikimrServices::TActivity::BS_PROXY_PUT_ACTOR, @@ -701,8 +700,8 @@ struct TBlobStorageGroupPutParameters { IActor* CreateBlobStorageGroupPutRequest(TBlobStorageGroupPutParameters params); struct TBlobStorageGroupMultiPutParameters { - TBlobStorageGroupRequestActor::TCommonParameters Common; - TBlobStorageGroupRequestActor::TTypeSpecificParameters TypeSpecific = { + TBlobStorageGroupRequestActor::TCommonParameters Common; + TBlobStorageGroupRequestActor::TTypeSpecificParameters TypeSpecific = { .LogComponent = NKikimrServices::BS_PROXY_PUT, .Name = "DSProxy.Put", .Activity = NKikimrServices::TActivity::BS_PROXY_PUT_ACTOR, @@ -726,8 +725,8 @@ struct TBlobStorageGroupMultiPutParameters { IActor* CreateBlobStorageGroupPutRequest(TBlobStorageGroupMultiPutParameters params); struct TBlobStorageGroupGetParameters { - TBlobStorageGroupRequestActor::TCommonParameters Common; - TBlobStorageGroupRequestActor::TTypeSpecificParameters TypeSpecific = { + TBlobStorageGroupRequestActor::TCommonParameters Common; + TBlobStorageGroupRequestActor::TTypeSpecificParameters TypeSpecific = { .LogComponent = NKikimrServices::BS_PROXY_GET, .Name = "DSProxy.Get", .Activity = NKikimrServices::TActivity::BS_PROXY_GET_ACTOR, @@ -737,8 +736,8 @@ struct TBlobStorageGroupGetParameters { IActor* CreateBlobStorageGroupGetRequest(TBlobStorageGroupGetParameters params); struct TBlobStorageGroupPatchParameters { - TBlobStorageGroupRequestActor::TCommonParameters Common; - TBlobStorageGroupRequestActor::TTypeSpecificParameters TypeSpecific = { + TBlobStorageGroupRequestActor::TCommonParameters Common; + TBlobStorageGroupRequestActor::TTypeSpecificParameters TypeSpecific = { .LogComponent = NKikimrServices::BS_PROXY_PATCH, .Name = "DSProxy.Patch", .Activity = NKikimrServices::TActivity::BS_PROXY_PATCH_ACTOR, @@ -749,8 +748,8 @@ struct TBlobStorageGroupPatchParameters { IActor* CreateBlobStorageGroupPatchRequest(TBlobStorageGroupPatchParameters params); struct TBlobStorageGroupMultiGetParameters { - TBlobStorageGroupRequestActor::TCommonParameters Common; - TBlobStorageGroupRequestActor::TTypeSpecificParameters TypeSpecific = { + TBlobStorageGroupRequestActor::TCommonParameters Common; + TBlobStorageGroupRequestActor::TTypeSpecificParameters TypeSpecific = { .LogComponent = NKikimrServices::BS_PROXY_MULTIGET, .Name = "DSProxy.MultiGet", .Activity = NKikimrServices::TActivity::BS_PROXY_MULTIGET_ACTOR, @@ -760,8 +759,8 @@ struct TBlobStorageGroupMultiGetParameters { IActor* CreateBlobStorageGroupMultiGetRequest(TBlobStorageGroupMultiGetParameters params); struct TBlobStorageGroupRestoreGetParameters { - TBlobStorageGroupRequestActor::TCommonParameters Common; - TBlobStorageGroupRequestActor::TTypeSpecificParameters TypeSpecific = { + TBlobStorageGroupRequestActor::TCommonParameters Common; + TBlobStorageGroupRequestActor::TTypeSpecificParameters TypeSpecific = { .LogComponent = NKikimrServices::BS_PROXY_INDEXRESTOREGET, .Name = "DSProxy.IndexRestoreGet", .Activity = NKikimrServices::TActivity::BS_PROXY_INDEXRESTOREGET_ACTOR, @@ -770,8 +769,8 @@ struct TBlobStorageGroupRestoreGetParameters { IActor* CreateBlobStorageGroupIndexRestoreGetRequest(TBlobStorageGroupRestoreGetParameters params); struct TBlobStorageGroupDiscoverParameters { - TBlobStorageGroupRequestActor::TCommonParameters Common; - TBlobStorageGroupRequestActor::TTypeSpecificParameters TypeSpecific = { + TBlobStorageGroupRequestActor::TCommonParameters Common; + TBlobStorageGroupRequestActor::TTypeSpecificParameters TypeSpecific = { .LogComponent = NKikimrServices::BS_PROXY_DISCOVER, .Name = "DSProxy.Discover", .Activity = NKikimrServices::TActivity::BS_GROUP_DISCOVER, @@ -782,8 +781,8 @@ IActor* CreateBlobStorageGroupMirror3dcDiscoverRequest(TBlobStorageGroupDiscover IActor* CreateBlobStorageGroupMirror3of4DiscoverRequest(TBlobStorageGroupDiscoverParameters params); struct TBlobStorageGroupCollectGarbageParameters { - TBlobStorageGroupRequestActor::TCommonParameters Common; - TBlobStorageGroupRequestActor::TTypeSpecificParameters TypeSpecific = { + TBlobStorageGroupRequestActor::TCommonParameters Common; + TBlobStorageGroupRequestActor::TTypeSpecificParameters TypeSpecific = { .LogComponent = NKikimrServices::BS_PROXY_COLLECT, .Name = "DSProxy.CollectGarbage", .Activity = NKikimrServices::TActivity::BS_GROUP_COLLECT_GARBAGE, @@ -792,8 +791,8 @@ struct TBlobStorageGroupCollectGarbageParameters { IActor* CreateBlobStorageGroupCollectGarbageRequest(TBlobStorageGroupCollectGarbageParameters params); struct TBlobStorageGroupMultiCollectParameters { - TBlobStorageGroupRequestActor::TCommonParameters Common; - TBlobStorageGroupRequestActor::TTypeSpecificParameters TypeSpecific = { + TBlobStorageGroupRequestActor::TCommonParameters Common; + TBlobStorageGroupRequestActor::TTypeSpecificParameters TypeSpecific = { .LogComponent = NKikimrServices::BS_PROXY_MULTICOLLECT, .Name = "DSProxy.MultiCollect", .Activity = NKikimrServices::TActivity::BS_PROXY_MULTICOLLECT_ACTOR, @@ -802,8 +801,8 @@ struct TBlobStorageGroupMultiCollectParameters { IActor* CreateBlobStorageGroupMultiCollectRequest(TBlobStorageGroupMultiCollectParameters params); struct TBlobStorageGroupBlockParameters { - TBlobStorageGroupRequestActor::TCommonParameters Common; - TBlobStorageGroupRequestActor::TTypeSpecificParameters TypeSpecific = { + TBlobStorageGroupRequestActor::TCommonParameters Common; + TBlobStorageGroupRequestActor::TTypeSpecificParameters TypeSpecific = { .LogComponent = NKikimrServices::BS_PROXY_BLOCK, .Name = "DSProxy.Block", .Activity = NKikimrServices::TActivity::BS_GROUP_BLOCK, @@ -812,8 +811,8 @@ struct TBlobStorageGroupBlockParameters { IActor* CreateBlobStorageGroupBlockRequest(TBlobStorageGroupBlockParameters params); struct TBlobStorageGroupStatusParameters { - TBlobStorageGroupRequestActor::TCommonParameters Common; - TBlobStorageGroupRequestActor::TTypeSpecificParameters TypeSpecific = { + TBlobStorageGroupRequestActor::TCommonParameters Common; + TBlobStorageGroupRequestActor::TTypeSpecificParameters TypeSpecific = { .LogComponent = NKikimrServices::BS_PROXY_STATUS, .Name = "DSProxy.Status", .Activity = NKikimrServices::TActivity::BS_PROXY_STATUS_ACTOR, @@ -822,8 +821,9 @@ struct TBlobStorageGroupStatusParameters { IActor* CreateBlobStorageGroupStatusRequest(TBlobStorageGroupStatusParameters params); struct TBlobStorageGroupAssimilateParameters { - TBlobStorageGroupRequestActor::TCommonParameters Common; - TBlobStorageGroupRequestActor::TTypeSpecificParameters TypeSpecific = { + TBlobStorageGroupRequestActor::TCommonParameters Common; + TBlobStorageGroupRequestActor + ::TTypeSpecificParameters TypeSpecific = { .LogComponent = NKikimrServices::BS_PROXY_ASSIMILATE, .Name = "DSProxy.Assimilate", .Activity = NKikimrServices::TActivity::BS_GROUP_ASSIMILATE, diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_get.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_get.cpp index c44a8534b091..026f86fd87c6 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_get.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_get.cpp @@ -384,6 +384,10 @@ class TBlobStorageGroupGetRequest : public TBlobStorageGroupRequestActor& mon) { + return mon->ActiveGet; + } + static constexpr ERequestType RequestType() { return ERequestType::Get; } diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_put.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_put.cpp index 71442f3cad04..db036df7a0f1 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_put.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_put.cpp @@ -510,6 +510,7 @@ class TBlobStorageGroupPutRequest : public TBlobStorageGroupRequestActorTactic) , Stats(std::move(params.Stats)) + , IsMultiPutMode(false) , IncarnationRecords(Info->GetTotalVDisksNum()) , ExpiredVDiskSet(&Info->GetTopology()) { @@ -531,20 +532,12 @@ class TBlobStorageGroupPutRequest : public TBlobStorageGroupRequestActorGetTotalVDisksNum()) - , ExpiredVDiskSet(&info->GetTopology()) -======= , TimeStatsEnabled(params.TimeStatsEnabled) , Tactic(params.Tactic) , Stats(std::move(params.Stats)) + , IsMultiPutMode(true) , IncarnationRecords(Info->GetTotalVDisksNum()) , ExpiredVDiskSet(&Info->GetTopology()) ->>>>>>> 4efd4715e9... Wrap ctor arguments in structs (#7631) { Y_DEBUG_ABORT_UNLESS(params.Events.size() <= MaxBatchedPutRequests); for (auto &ev : params.Events) { From f57bdce54c052f78ab9234e7b4afad6a55f532c5 Mon Sep 17 00:00:00 2001 From: Sergey Belyakov Date: Tue, 13 Aug 2024 22:12:36 +0300 Subject: [PATCH 06/13] Add ICB-configurable parameters for accelerates (#7534) --- ydb/core/base/blobstorage.h | 5 + ydb/core/blobstorage/backpressure/common.h | 36 + ydb/core/blobstorage/backpressure/event.cpp | 2 +- ydb/core/blobstorage/backpressure/event.h | 2 +- ydb/core/blobstorage/backpressure/queue.cpp | 3 +- ydb/core/blobstorage/backpressure/queue.h | 16 +- .../queue_backpressure_client.cpp | 10 +- .../backpressure/queue_backpressure_client.h | 3 +- ydb/core/blobstorage/dsproxy/dsproxy.h | 25 +- .../dsproxy/dsproxy_blackboard.cpp | 31 +- .../blobstorage/dsproxy/dsproxy_blackboard.h | 14 +- ydb/core/blobstorage/dsproxy/dsproxy_get.cpp | 26 +- .../blobstorage/dsproxy/dsproxy_get_impl.cpp | 33 +- .../blobstorage/dsproxy/dsproxy_get_impl.h | 12 +- ydb/core/blobstorage/dsproxy/dsproxy_impl.cpp | 42 +- ydb/core/blobstorage/dsproxy/dsproxy_impl.h | 12 +- ydb/core/blobstorage/dsproxy/dsproxy_put.cpp | 34 +- .../blobstorage/dsproxy/dsproxy_put_impl.cpp | 10 +- .../blobstorage/dsproxy/dsproxy_put_impl.h | 11 +- .../blobstorage/dsproxy/dsproxy_request.cpp | 20 +- .../blobstorage/dsproxy/dsproxy_state.cpp | 10 +- .../dsproxy/dsproxy_strategy_accelerate_put.h | 4 +- .../dsproxy_strategy_accelerate_put_m3dc.h | 16 +- .../dsproxy/dsproxy_strategy_base.cpp | 9 +- .../dsproxy/dsproxy_strategy_base.h | 5 +- .../dsproxy/dsproxy_strategy_get_bold.h | 4 +- .../dsproxy/dsproxy_strategy_get_m3dc_basic.h | 12 +- .../dsproxy_strategy_get_m3dc_restore.h | 5 +- .../dsproxy/dsproxy_strategy_get_m3of4.h | 4 +- .../dsproxy_strategy_get_min_iops_block.h | 6 +- .../dsproxy_strategy_get_min_iops_mirror.h | 4 +- .../dsproxy/dsproxy_strategy_put_m3dc.h | 5 +- .../dsproxy/dsproxy_strategy_put_m3of4.h | 4 +- .../dsproxy/dsproxy_strategy_restore.h | 10 +- .../blobstorage/dsproxy/group_sessions.cpp | 4 +- ydb/core/blobstorage/dsproxy/group_sessions.h | 47 +- .../dsproxy/ut/dsproxy_env_mock_ut.h | 10 +- .../blobstorage/dsproxy/ut/dsproxy_get_ut.cpp | 20 +- .../blobstorage/dsproxy/ut/dsproxy_put_ut.cpp | 9 +- .../blobstorage/dsproxy/ut_fat/dsproxy_ut.cpp | 15 +- .../dsproxy_fault_tolerance_ut_runtime.h | 10 +- .../dsproxy/ut_strategy/strategy_ut.cpp | 6 +- ydb/core/blobstorage/nodewarden/node_warden.h | 1 + .../nodewarden/node_warden_impl.cpp | 3 + .../blobstorage/nodewarden/node_warden_impl.h | 5 + .../nodewarden/node_warden_proxy.cpp | 29 +- .../ut_blobstorage/acceleration.cpp | 624 +++++++++++++++--- .../ut_blobstorage/blob_depot_test_env.h | 27 +- .../blob_depot_test_functions.cpp | 28 +- .../blobstorage/ut_blobstorage/lib/common.h | 42 ++ ydb/core/blobstorage/ut_blobstorage/lib/env.h | 6 + ydb/core/blobstorage/ut_group/main.cpp | 11 +- .../vdisk/balance/balancing_actor.cpp | 2 +- .../blobstorage/vdisk/common/vdisk_config.h | 1 + .../blobstorage/vdisk/common/vdisk_queues.h | 5 +- .../vdisk/repl/blobstorage_repl.cpp | 2 +- .../vdisk/scrub/blob_recovery_queue.cpp | 2 +- .../vdisk/skeleton/blobstorage_skeleton.cpp | 3 +- ydb/core/cms/json_proxy_proto.h | 2 + ydb/core/protos/config.proto | 14 + 60 files changed, 1065 insertions(+), 308 deletions(-) diff --git a/ydb/core/base/blobstorage.h b/ydb/core/base/blobstorage.h index d8ebabbf6608..68e1b91cc273 100644 --- a/ydb/core/base/blobstorage.h +++ b/ydb/core/base/blobstorage.h @@ -492,6 +492,8 @@ struct TEvBlobStorage { EvInplacePatch, EvAssimilate, + EvGetQueuesInfo, // for debugging purposes + // EvPutResult = EvPut + 512, /// 268 632 576 EvGetResult, @@ -506,6 +508,8 @@ struct TEvBlobStorage { EvInplacePatchResult, EvAssimilateResult, + EvQueuesInfo, // for debugging purposes + // proxy <-> vdisk interface EvVPut = EvPut + 2 * 512, /// 268 633 088 EvVGet, @@ -873,6 +877,7 @@ struct TEvBlobStorage { EvRunActor = EvPut + 15 * 512, EvVMockCtlRequest, EvVMockCtlResponse, + EvDelayedMessageWrapper, // incremental huge blob keeper EvIncrHugeInit = EvPut + 17 * 512, diff --git a/ydb/core/blobstorage/backpressure/common.h b/ydb/core/blobstorage/backpressure/common.h index 6539a5db84e8..fc94ac6b1fce 100644 --- a/ydb/core/blobstorage/backpressure/common.h +++ b/ydb/core/blobstorage/backpressure/common.h @@ -15,3 +15,39 @@ #define QLOG_DEBUG_S(marker, arg) QLOG_LOG_S(marker, NActors::NLog::PRI_DEBUG , arg) LWTRACE_USING(BLOBSTORAGE_PROVIDER); + +namespace NKikimr::NBsQueue { + +// Special timer for debug purposes, which works with virtual time of TTestActorSystem +struct TActivationContextTimer { + TActivationContextTimer() + : CreationTimestamp(NActors::TActivationContext::Monotonic()) + {} + + double Passed() const { + return (NActors::TActivationContext::Monotonic() - CreationTimestamp).SecondsFloat(); + } + + TMonotonic CreationTimestamp; +}; + +struct TBSQueueTimer { + TBSQueueTimer(bool useActorSystemTime) + { + if (useActorSystemTime) { + Timer.emplace(); + } else { + Timer.emplace(); + } + } + + std::variant Timer; + + double Passed() const { + return std::visit([](const auto& timer) -> double { + return timer.Passed(); + }, Timer); + } +}; + +} // namespace NKikimr::NBsQueue \ No newline at end of file diff --git a/ydb/core/blobstorage/backpressure/event.cpp b/ydb/core/blobstorage/backpressure/event.cpp index 4426704fe6bd..db9bf4078cd7 100644 --- a/ydb/core/blobstorage/backpressure/event.cpp +++ b/ydb/core/blobstorage/backpressure/event.cpp @@ -27,7 +27,7 @@ IEventBase *TEventHolder::MakeErrorReply(NKikimrProto::EReplyStatus status, cons void TEventHolder::SendToVDisk(const TActorContext& ctx, const TActorId& remoteVDisk, ui64 queueCookie, ui64 msgId, ui64 sequenceId, bool sendMeCostSettings, NWilson::TTraceId traceId, const NBackpressure::TQueueClientId& clientId, - const THPTimer& processingTimer) { + const TBSQueueTimer& processingTimer) { // check that we are not discarded yet Y_ABORT_UNLESS(Type != 0); diff --git a/ydb/core/blobstorage/backpressure/event.h b/ydb/core/blobstorage/backpressure/event.h index 0c77f3c4a188..7400a9ce8850 100644 --- a/ydb/core/blobstorage/backpressure/event.h +++ b/ydb/core/blobstorage/backpressure/event.h @@ -142,7 +142,7 @@ class TEventHolder { void SendToVDisk(const TActorContext& ctx, const TActorId& remoteVDisk, ui64 queueCookie, ui64 msgId, ui64 sequenceId, bool sendMeCostSettings, NWilson::TTraceId traceId, const NBackpressure::TQueueClientId& clientId, - const THPTimer& processingTimer); + const TBSQueueTimer& processingTimer); void Discard(); }; diff --git a/ydb/core/blobstorage/backpressure/queue.cpp b/ydb/core/blobstorage/backpressure/queue.cpp index b2402a32c671..da7433ff8cc3 100644 --- a/ydb/core/blobstorage/backpressure/queue.cpp +++ b/ydb/core/blobstorage/backpressure/queue.cpp @@ -4,7 +4,7 @@ namespace NKikimr::NBsQueue { TBlobStorageQueue::TBlobStorageQueue(const TIntrusivePtr<::NMonitoring::TDynamicCounters>& counters, TString& logPrefix, const TBSProxyContextPtr& bspctx, const NBackpressure::TQueueClientId& clientId, ui32 interconnectChannel, - const TBlobStorageGroupType& gType, NMonitoring::TCountableBase::EVisibility visibility) + const TBlobStorageGroupType& gType, NMonitoring::TCountableBase::EVisibility visibility, bool useActorSystemTime) : Queues(bspctx) , WindowSize(0) , InFlightCost(0) @@ -16,6 +16,7 @@ TBlobStorageQueue::TBlobStorageQueue(const TIntrusivePtr<::NMonitoring::TDynamic , ClientId(clientId) , BytesWaiting(0) , InterconnectChannel(interconnectChannel) + , UseActorSystemTime(useActorSystemTime) // use parent group visibility , QueueWaitingItems(counters->GetCounter("QueueWaitingItems", false, visibility)) , QueueWaitingBytes(counters->GetCounter("QueueWaitingBytes", false, visibility)) diff --git a/ydb/core/blobstorage/backpressure/queue.h b/ydb/core/blobstorage/backpressure/queue.h index 61d9843f1451..b0acd5383758 100644 --- a/ydb/core/blobstorage/backpressure/queue.h +++ b/ydb/core/blobstorage/backpressure/queue.h @@ -51,7 +51,8 @@ class TBlobStorageQueue { const ui64 QueueCookie; ui64 Cost; bool DirtyCost; - THPTimer ProcessingTimer; + TBSQueueTimer ProcessingTimer; + TTrackableList::iterator Iterator; template @@ -59,7 +60,7 @@ class TBlobStorageQueue { const ::NMonitoring::TDynamicCounters::TCounterPtr& serItems, const ::NMonitoring::TDynamicCounters::TCounterPtr& serBytes, const TBSProxyContextPtr& bspctx, ui32 interconnectChannel, - bool local) + bool local, bool useActorSystemTime) : Queue(EItemQueue::NotSet) , CostEssence(*event->Get()) , Span(TWilson::VDiskTopLevel, std::move(event->TraceId), "Backpressure.InFlight") @@ -70,6 +71,7 @@ class TBlobStorageQueue { , QueueCookie(RandomNumber()) , Cost(0) , DirtyCost(true) + , ProcessingTimer(useActorSystemTime) { if (Span) { Span @@ -129,6 +131,8 @@ class TBlobStorageQueue { const ui32 InterconnectChannel; + const bool UseActorSystemTime; + public: ::NMonitoring::TDynamicCounters::TCounterPtr QueueWaitingItems; ::NMonitoring::TDynamicCounters::TCounterPtr QueueWaitingBytes; @@ -156,7 +160,8 @@ class TBlobStorageQueue { TBlobStorageQueue(const TIntrusivePtr<::NMonitoring::TDynamicCounters>& counters, TString& logPrefix, const TBSProxyContextPtr& bspctx, const NBackpressure::TQueueClientId& clientId, ui32 interconnectChannel, const TBlobStorageGroupType &gType, - NMonitoring::TCountableBase::EVisibility visibility = NMonitoring::TCountableBase::EVisibility::Public); + NMonitoring::TCountableBase::EVisibility visibility = NMonitoring::TCountableBase::EVisibility::Public, + bool useActorSystemTime = false); ~TBlobStorageQueue(); @@ -213,7 +218,8 @@ class TBlobStorageQueue { TItemList::iterator newIt; if (Queues.Unused.empty()) { newIt = Queues.Waiting.emplace(Queues.Waiting.end(), event, deadline, - QueueSerializedItems, QueueSerializedBytes, BSProxyCtx, InterconnectChannel, local); + QueueSerializedItems, QueueSerializedBytes, BSProxyCtx, InterconnectChannel, local, + UseActorSystemTime); ++*QueueSize; } else { newIt = Queues.Unused.begin(); @@ -222,7 +228,7 @@ class TBlobStorageQueue { TItem& item = *newIt; item.~TItem(); new(&item) TItem(event, deadline, QueueSerializedItems, QueueSerializedBytes, BSProxyCtx, - InterconnectChannel, local); + InterconnectChannel, local, UseActorSystemTime); } newIt->Iterator = newIt; diff --git a/ydb/core/blobstorage/backpressure/queue_backpressure_client.cpp b/ydb/core/blobstorage/backpressure/queue_backpressure_client.cpp index f361d6a80782..d8c2c6db742c 100644 --- a/ydb/core/blobstorage/backpressure/queue_backpressure_client.cpp +++ b/ydb/core/blobstorage/backpressure/queue_backpressure_client.cpp @@ -77,12 +77,13 @@ class TVDiskBackpressureClientActor : public TActorBootstrapped& counters, const TBSProxyContextPtr& bspctx, const NBackpressure::TQueueClientId& clientId, const TString& queueName, ui32 interconnectChannel, bool /*local*/, TDuration watchdogTimeout, - TIntrusivePtr &flowRecord, NMonitoring::TCountableBase::EVisibility visibility) + TIntrusivePtr &flowRecord, NMonitoring::TCountableBase::EVisibility visibility, + bool useActorSystemTime) : BSProxyCtx(bspctx) , QueueName(queueName) , Counters(counters->GetSubgroup("queue", queueName)) , Queue(Counters, LogPrefix, bspctx, clientId, interconnectChannel, - (info ? info->Type : TErasureType::ErasureNone), visibility) + (info ? info->Type : TErasureType::ErasureNone), visibility, useActorSystemTime) , VDiskIdShort(vdiskId) , QueueId(queueId) , QueueWatchdogTimeout(watchdogTimeout) @@ -975,9 +976,10 @@ IActor* CreateVDiskBackpressureClient(const TIntrusivePtr NKikimrBlobStorage::EVDiskQueueId queueId,const TIntrusivePtr<::NMonitoring::TDynamicCounters>& counters, const TBSProxyContextPtr& bspctx, const NBackpressure::TQueueClientId& clientId, const TString& queueName, ui32 interconnectChannel, bool local, TDuration watchdogTimeout, - TIntrusivePtr &flowRecord, NMonitoring::TCountableBase::EVisibility visibility) { + TIntrusivePtr &flowRecord, NMonitoring::TCountableBase::EVisibility visibility, + bool useActorSystemTime) { return new NBsQueue::TVDiskBackpressureClientActor(info, vdiskId, queueId, counters, bspctx, clientId, queueName, - interconnectChannel, local, watchdogTimeout, flowRecord, visibility); + interconnectChannel, local, watchdogTimeout, flowRecord, visibility, useActorSystemTime); } } // NKikimr diff --git a/ydb/core/blobstorage/backpressure/queue_backpressure_client.h b/ydb/core/blobstorage/backpressure/queue_backpressure_client.h index 0b80053d6133..722874818838 100644 --- a/ydb/core/blobstorage/backpressure/queue_backpressure_client.h +++ b/ydb/core/blobstorage/backpressure/queue_backpressure_client.h @@ -50,6 +50,7 @@ namespace NKikimr { NKikimrBlobStorage::EVDiskQueueId queueId,const TIntrusivePtr<::NMonitoring::TDynamicCounters>& counters, const TBSProxyContextPtr& bspctx, const NBackpressure::TQueueClientId& clientId, const TString& queueName, ui32 interconnectChannel, bool local, TDuration watchdogTimeout, - TIntrusivePtr &flowRecord, NMonitoring::TCountableBase::EVisibility visibility); + TIntrusivePtr &flowRecord, NMonitoring::TCountableBase::EVisibility visibility, + bool useActorSystemTime = false); } // NKikimr diff --git a/ydb/core/blobstorage/dsproxy/dsproxy.h b/ydb/core/blobstorage/dsproxy/dsproxy.h index 17eb1d8fc938..21a9ab43011c 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy.h +++ b/ydb/core/blobstorage/dsproxy/dsproxy.h @@ -55,6 +55,9 @@ const ui32 MaskSizeBits = 32; constexpr bool DefaultEnablePutBatching = true; constexpr bool DefaultEnableVPatch = false; +constexpr float DefaultSlowDiskThreshold = 2; +constexpr float DefaultPredictedDelayMultiplier = 1; + constexpr bool WithMovingPatchRequestToStaticNode = true; //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// @@ -168,6 +171,11 @@ inline void SetExecutionRelay(IEventBase& ev, std::shared_ptr class TBlobStorageGroupRequestActor : public TActor { public: @@ -696,6 +704,7 @@ struct TBlobStorageGroupPutParameters { bool TimeStatsEnabled; TDiskResponsivenessTracker::TPerDiskStatsPtr Stats; bool EnableRequestMod3x3ForMinLatency; + TAccelerationParams AccelerationParams; }; IActor* CreateBlobStorageGroupPutRequest(TBlobStorageGroupPutParameters params); @@ -713,6 +722,7 @@ struct TBlobStorageGroupMultiPutParameters { NKikimrBlobStorage::EPutHandleClass HandleClass; TEvBlobStorage::TEvPut::ETactic Tactic; bool EnableRequestMod3x3ForMinLatency; + TAccelerationParams AccelerationParams; static ui32 CalculateRestartCounter(TBatchedVec& events) { ui32 maxRestarts = 0; @@ -732,6 +742,7 @@ struct TBlobStorageGroupGetParameters { .Activity = NKikimrServices::TActivity::BS_PROXY_GET_ACTOR, }; TNodeLayoutInfoPtr NodeLayout; + TAccelerationParams AccelerationParams; }; IActor* CreateBlobStorageGroupGetRequest(TBlobStorageGroupGetParameters params); @@ -833,12 +844,20 @@ IActor* CreateBlobStorageGroupAssimilateRequest(TBlobStorageGroupAssimilateParam IActor* CreateBlobStorageGroupEjectedProxy(ui32 groupId, TIntrusivePtr &nodeMon); +struct TBlobStorageProxyParameters { + bool UseActorSystemTimeInBSQueue = false; + + const TControlWrapper& EnablePutBatching; + const TControlWrapper& EnableVPatch; + const TControlWrapper& SlowDiskThreshold; + const TControlWrapper& PredictedDelayMultiplier; +}; + IActor* CreateBlobStorageGroupProxyConfigured(TIntrusivePtr&& info, bool forceWaitAllDrives, TIntrusivePtr &nodeMon, - TIntrusivePtr&& storagePoolCounters, const TControlWrapper &enablePutBatching, - const TControlWrapper &enableVPatch); + TIntrusivePtr&& storagePoolCounters, const TBlobStorageProxyParameters& params); IActor* CreateBlobStorageGroupProxyUnconfigured(ui32 groupId, TIntrusivePtr &nodeMon, - const TControlWrapper &enablePutBatching, const TControlWrapper &enableVPatch); + const TBlobStorageProxyParameters& params); }//NKikimr diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_blackboard.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_blackboard.cpp index 397a2cbd3de3..1de899664b5c 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_blackboard.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_blackboard.cpp @@ -178,12 +178,17 @@ ui64 TBlobState::GetPredictedDelayNs(const TBlobStorageGroupInfo &info, TGroupQu } void TBlobState::GetWorstPredictedDelaysNs(const TBlobStorageGroupInfo &info, TGroupQueues &groupQueues, - NKikimrBlobStorage::EVDiskQueueId queueId, ui32 nWorst, TDiskDelayPredictions *outNWorst) const { + NKikimrBlobStorage::EVDiskQueueId queueId, TDiskDelayPredictions *outNWorst, + double multiplier) const { outNWorst->resize(Disks.size()); for (ui32 diskIdx = 0; diskIdx < Disks.size(); ++diskIdx) { - (*outNWorst)[diskIdx] = { GetPredictedDelayNs(info, groupQueues, diskIdx, queueId), diskIdx }; + (*outNWorst)[diskIdx] = { + static_cast(GetPredictedDelayNs(info, groupQueues, diskIdx, queueId) * multiplier), + diskIdx + }; } - std::partial_sort(outNWorst->begin(), outNWorst->begin() + std::min(nWorst, (ui32)Disks.size()), outNWorst->end()); + ui32 sortedPrefixSize = std::min(3u, (ui32)Disks.size()); + std::partial_sort(outNWorst->begin(), outNWorst->begin() + sortedPrefixSize, outNWorst->end()); } bool TBlobState::HasWrittenQuorum(const TBlobStorageGroupInfo& info, const TBlobStorageGroupInfo::TGroupVDisks& expired) const { @@ -361,7 +366,8 @@ void TBlackboard::AddErrorResponse(const TLogoBlobID &id, ui32 orderNumber) { } EStrategyOutcome TBlackboard::RunStrategies(TLogContext &logCtx, const TStackVec& s, - TBatchedVec *finished, const TBlobStorageGroupInfo::TGroupVDisks *expired) { + const TAccelerationParams& accelerationParams, TBatchedVec *finished, + const TBlobStorageGroupInfo::TGroupVDisks *expired) { for (auto it = BlobStates.begin(); it != BlobStates.end(); ) { auto& blob = it->second; if (!std::exchange(blob.IsChanged, false)) { @@ -373,7 +379,7 @@ EStrategyOutcome TBlackboard::RunStrategies(TLogContext &logCtx, const TStackVec NKikimrProto::EReplyStatus status = NKikimrProto::OK; TString errorReason; for (IStrategy *strategy : s) { - switch (auto res = strategy->Process(logCtx, blob, *Info, *this, GroupDiskRequests)) { + switch (auto res = strategy->Process(logCtx, blob, *Info, *this, GroupDiskRequests, accelerationParams)) { case EStrategyOutcome::IN_PROGRESS: status = NKikimrProto::UNKNOWN; break; @@ -415,8 +421,9 @@ EStrategyOutcome TBlackboard::RunStrategies(TLogContext &logCtx, const TStackVec } EStrategyOutcome TBlackboard::RunStrategy(TLogContext &logCtx, const IStrategy& s, - TBatchedVec *finished, const TBlobStorageGroupInfo::TGroupVDisks *expired) { - return RunStrategies(logCtx, {const_cast(&s)}, finished, expired); + const TAccelerationParams& accelerationParams, TBatchedVec *finished, + const TBlobStorageGroupInfo::TGroupVDisks *expired) { + return RunStrategies(logCtx, {const_cast(&s)}, accelerationParams, finished, expired); } TBlobState& TBlackboard::GetState(const TLogoBlobID &id) { @@ -458,13 +465,17 @@ void TBlackboard::ReportPartMapStatus(const TLogoBlobID &id, ssize_t partMapInde } void TBlackboard::GetWorstPredictedDelaysNs(const TBlobStorageGroupInfo &info, TGroupQueues &groupQueues, - NKikimrBlobStorage::EVDiskQueueId queueId, ui32 nWorst, TDiskDelayPredictions *outNWorst) const { + NKikimrBlobStorage::EVDiskQueueId queueId, TDiskDelayPredictions *outNWorst, + double multiplier) const { ui32 totalVDisks = info.GetTotalVDisksNum(); outNWorst->resize(totalVDisks); for (ui32 orderNumber = 0; orderNumber < totalVDisks; ++orderNumber) { - (*outNWorst)[orderNumber] = { groupQueues.GetPredictedDelayNsByOrderNumber(orderNumber, queueId), orderNumber }; + (*outNWorst)[orderNumber] = { + static_cast(groupQueues.GetPredictedDelayNsByOrderNumber(orderNumber, queueId) * multiplier), + orderNumber + }; } - std::partial_sort(outNWorst->begin(), outNWorst->begin() + std::min(nWorst, totalVDisks), outNWorst->end()); + std::partial_sort(outNWorst->begin(), outNWorst->begin() + std::min(3u, totalVDisks), outNWorst->end()); } void TBlackboard::RegisterBlobForPut(const TLogoBlobID& id, size_t blobIdx) { diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_blackboard.h b/ydb/core/blobstorage/dsproxy/dsproxy_blackboard.h index 54d53302cdf5..1cf53be9675e 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_blackboard.h +++ b/ydb/core/blobstorage/dsproxy/dsproxy_blackboard.h @@ -97,7 +97,8 @@ struct TBlobState { ui64 GetPredictedDelayNs(const TBlobStorageGroupInfo &info, TGroupQueues &groupQueues, ui32 diskIdxInSubring, NKikimrBlobStorage::EVDiskQueueId queueId) const; void GetWorstPredictedDelaysNs(const TBlobStorageGroupInfo &info, TGroupQueues &groupQueues, - NKikimrBlobStorage::EVDiskQueueId queueId, ui32 nWorst, TDiskDelayPredictions *outNWorst) const; + NKikimrBlobStorage::EVDiskQueueId queueId, TDiskDelayPredictions *outNWorst, + double multipler = 1) const; TString ToString() const; bool HasWrittenQuorum(const TBlobStorageGroupInfo& info, const TBlobStorageGroupInfo::TGroupVDisks& expired) const; @@ -158,7 +159,8 @@ class IStrategy { public: virtual ~IStrategy() = default; virtual EStrategyOutcome Process(TLogContext &logCtx, TBlobState &state, const TBlobStorageGroupInfo &info, - TBlackboard &blackboard, TGroupDiskRequests &groupDiskRequests) = 0; + TBlackboard &blackboard, TGroupDiskRequests &groupDiskRequests, + const TAccelerationParams& accelerationParams) = 0; }; struct TBlackboard { @@ -201,14 +203,16 @@ struct TBlackboard { void AddNotYetResponse(const TLogoBlobID &id, ui32 orderNumber); EStrategyOutcome RunStrategies(TLogContext& logCtx, const TStackVec& strategies, - TBatchedVec *finished = nullptr, const TBlobStorageGroupInfo::TGroupVDisks *expired = nullptr); - EStrategyOutcome RunStrategy(TLogContext &logCtx, const IStrategy& s, TBatchedVec *finished = nullptr, + const TAccelerationParams& accelerationParams, TBatchedVec *finished = nullptr, const TBlobStorageGroupInfo::TGroupVDisks *expired = nullptr); + EStrategyOutcome RunStrategy(TLogContext &logCtx, const IStrategy& s, const TAccelerationParams& accelerationParams, + TBatchedVec *finished = nullptr, const TBlobStorageGroupInfo::TGroupVDisks *expired = nullptr); TBlobState& GetState(const TLogoBlobID &id); ssize_t AddPartMap(const TLogoBlobID &id, ui32 diskOrderNumber, ui32 requestIndex); void ReportPartMapStatus(const TLogoBlobID &id, ssize_t partMapIndex, ui32 responseIndex, NKikimrProto::EReplyStatus status); void GetWorstPredictedDelaysNs(const TBlobStorageGroupInfo &info, TGroupQueues &groupQueues, - NKikimrBlobStorage::EVDiskQueueId queueId, ui32 nWorst, TDiskDelayPredictions *outNWorst) const; + NKikimrBlobStorage::EVDiskQueueId queueId, TDiskDelayPredictions *outNWorst, + double multiplier = 1) const; TString ToString() const; void ChangeAll() { diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_get.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_get.cpp index 026f86fd87c6..86542ed2b1d4 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_get.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_get.cpp @@ -57,6 +57,8 @@ class TBlobStorageGroupGetRequest : public TBlobStorageGroupRequestActorGet()->CauseIdx); @@ -315,13 +317,13 @@ class TBlobStorageGroupGetRequest : public TBlobStorageGroupRequestActor StartTime) ? (now - StartTime) : TDuration::MilliSeconds(0); - if (timeSinceStart.MicroSeconds() < timeToAccelerateUs) { + TInstant nextAcceleration = StartTime + timeToAccelerate; + if (nextAcceleration > now) { ui64 causeIdx = RootCauseTrack.RegisterAccelerate(); - Schedule(TDuration::MicroSeconds(timeToAccelerateUs - timeSinceStart.MicroSeconds()), - new TEvAccelerateGet(causeIdx)); + Schedule(nextAcceleration - now, new TEvAccelerateGet(causeIdx)); IsGetAccelerateScheduled = true; } else { AccelerateGet(); @@ -334,13 +336,13 @@ class TBlobStorageGroupGetRequest : public TBlobStorageGroupRequestActor StartTimePut) ? (now - StartTimePut) : TDuration::MilliSeconds(0); - if (timeSinceStart.MicroSeconds() < timeToAccelerateUs) { + TInstant nextAcceleration = StartTime + timeToAccelerate; + if (nextAcceleration > now) { ui64 causeIdx = RootCauseTrack.RegisterAccelerate(); - Schedule(TDuration::MicroSeconds(timeToAccelerateUs - timeSinceStart.MicroSeconds()), - new TEvAcceleratePut(causeIdx)); + Schedule(nextAcceleration - now, new TEvAcceleratePut(causeIdx)); IsPutAccelerateScheduled = true; } else { AcceleratePut(); @@ -392,16 +394,18 @@ class TBlobStorageGroupGetRequest : public TBlobStorageGroupRequestActorOrbit)) , Deadline(params.Common.Event->Deadline) , StartTime(params.Common.Now) , StartTimePut(StartTime) , GroupSize(Info->Type.BlobSubgroupSize()) , ReportedBytes(0) + , AccelerationParams(params.AccelerationParams) { ReportBytes(sizeof(*this)); MaxSaneRequests = params.Common.Event->QuerySize * Info->Type.TotalPartCount() * diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_get_impl.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_get_impl.cpp index 18c63e66de73..5b41baa16d80 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_get_impl.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_get_impl.cpp @@ -159,29 +159,28 @@ void TGetImpl::PrepareReply(NKikimrProto::EReplyStatus status, TString errorReas } -ui64 TGetImpl::GetTimeToAccelerateNs(TLogContext &logCtx, NKikimrBlobStorage::EVDiskQueueId queueId, ui32 nthWorst) { +ui64 TGetImpl::GetTimeToAccelerateNs(TLogContext &logCtx, NKikimrBlobStorage::EVDiskQueueId queueId) { Y_UNUSED(logCtx); // Find the slowest disk TDiskDelayPredictions worstDisks; if (Blackboard.BlobStates.size() == 1) { Blackboard.BlobStates.begin()->second.GetWorstPredictedDelaysNs( - *Info, *Blackboard.GroupQueues, queueId, nthWorst, &worstDisks); + *Info, *Blackboard.GroupQueues, queueId, &worstDisks, + AccelerationParams.PredictedDelayMultiplier); } else { Blackboard.GetWorstPredictedDelaysNs( - *Info, *Blackboard.GroupQueues, queueId, nthWorst, &worstDisks); + *Info, *Blackboard.GroupQueues, queueId, &worstDisks, + AccelerationParams.PredictedDelayMultiplier); } - nthWorst = std::min(nthWorst, (ui32)worstDisks.size() - 1); - return worstDisks[nthWorst].PredictedNs; + return worstDisks[std::min(3u, (ui32)worstDisks.size() - 1)].PredictedNs; } -ui64 TGetImpl::GetTimeToAccelerateGetNs(TLogContext &logCtx, ui32 acceleratesSent) { - Y_DEBUG_ABORT_UNLESS(acceleratesSent < 2); - return GetTimeToAccelerateNs(logCtx, HandleClassToQueueId(Blackboard.GetHandleClass), 2 - acceleratesSent); +ui64 TGetImpl::GetTimeToAccelerateGetNs(TLogContext &logCtx) { + return GetTimeToAccelerateNs(logCtx, HandleClassToQueueId(Blackboard.GetHandleClass)); } -ui64 TGetImpl::GetTimeToAcceleratePutNs(TLogContext &logCtx, ui32 acceleratesSent) { - Y_DEBUG_ABORT_UNLESS(acceleratesSent < 2); - return GetTimeToAccelerateNs(logCtx, HandleClassToQueueId(Blackboard.PutHandleClass), 2 - acceleratesSent); +ui64 TGetImpl::GetTimeToAcceleratePutNs(TLogContext &logCtx) { + return GetTimeToAccelerateNs(logCtx, HandleClassToQueueId(Blackboard.PutHandleClass)); } TString TGetImpl::DumpFullState() const { @@ -328,13 +327,13 @@ EStrategyOutcome TGetImpl::RunBoldStrategy(TLogContext &logCtx) { if (MustRestoreFirst) { strategies.push_back(&s2); } - return Blackboard.RunStrategies(logCtx, strategies); + return Blackboard.RunStrategies(logCtx, strategies, AccelerationParams); } EStrategyOutcome TGetImpl::RunMirror3dcStrategy(TLogContext &logCtx) { return MustRestoreFirst - ? Blackboard.RunStrategy(logCtx, TMirror3dcGetWithRestoreStrategy()) - : Blackboard.RunStrategy(logCtx, TMirror3dcBasicGetStrategy(NodeLayout, PhantomCheck)); + ? Blackboard.RunStrategy(logCtx, TMirror3dcGetWithRestoreStrategy(), AccelerationParams) + : Blackboard.RunStrategy(logCtx, TMirror3dcBasicGetStrategy(NodeLayout, PhantomCheck), AccelerationParams); } EStrategyOutcome TGetImpl::RunMirror3of4Strategy(TLogContext &logCtx) { @@ -345,7 +344,7 @@ EStrategyOutcome TGetImpl::RunMirror3of4Strategy(TLogContext &logCtx) { if (MustRestoreFirst) { strategies.push_back(&s2); } - return Blackboard.RunStrategies(logCtx, strategies); + return Blackboard.RunStrategies(logCtx, strategies, AccelerationParams); } EStrategyOutcome TGetImpl::RunStrategies(TLogContext &logCtx) { @@ -356,9 +355,9 @@ EStrategyOutcome TGetImpl::RunStrategies(TLogContext &logCtx) { } else if (MustRestoreFirst || PhantomCheck) { return RunBoldStrategy(logCtx); } else if (Info->Type.ErasureFamily() == TErasureType::ErasureParityBlock) { - return Blackboard.RunStrategy(logCtx, TMinIopsBlockStrategy()); + return Blackboard.RunStrategy(logCtx, TMinIopsBlockStrategy(), AccelerationParams); } else if (Info->Type.ErasureFamily() == TErasureType::ErasureMirror) { - return Blackboard.RunStrategy(logCtx, TMinIopsMirrorStrategy()); + return Blackboard.RunStrategy(logCtx, TMinIopsMirrorStrategy(), AccelerationParams); } else { return RunBoldStrategy(logCtx); } diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_get_impl.h b/ydb/core/blobstorage/dsproxy/dsproxy_get_impl.h index 2b7623c63f93..53d7433651b1 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_get_impl.h +++ b/ydb/core/blobstorage/dsproxy/dsproxy_get_impl.h @@ -49,9 +49,12 @@ class TGetImpl { std::unordered_map> BlobFlags; // keep, doNotKeep per blob + TAccelerationParams AccelerationParams; + public: TGetImpl(const TIntrusivePtr &info, const TIntrusivePtr &groupQueues, - TEvBlobStorage::TEvGet *ev, TNodeLayoutInfoPtr&& nodeLayout, const TString& requestPrefix = {}) + TEvBlobStorage::TEvGet *ev, TNodeLayoutInfoPtr&& nodeLayout, + const TAccelerationParams& accelerationParams, const TString& requestPrefix = {}) : Deadline(ev->Deadline) , Info(info) , Queries(ev->Queries.Release()) @@ -68,6 +71,7 @@ class TGetImpl { , PhantomCheck(ev->PhantomCheck) , Decommission(ev->Decommission) , ReaderTabletData(ev->ReaderTabletData) + , AccelerationParams(accelerationParams) { Y_ABORT_UNLESS(QuerySize > 0); } @@ -275,8 +279,8 @@ class TGetImpl { AccelerateGet(logCtx, slowDisksMask, outVGets, outVPuts); } - ui64 GetTimeToAccelerateGetNs(TLogContext &logCtx, ui32 acceleratesSent); - ui64 GetTimeToAcceleratePutNs(TLogContext &logCtx, ui32 acceleratesSent); + ui64 GetTimeToAccelerateGetNs(TLogContext &logCtx); + ui64 GetTimeToAcceleratePutNs(TLogContext &logCtx); TString DumpFullState() const; @@ -313,7 +317,7 @@ class TGetImpl { void PrepareVPuts(TLogContext &logCtx, TDeque> &outVPuts); - ui64 GetTimeToAccelerateNs(TLogContext &logCtx, NKikimrBlobStorage::EVDiskQueueId queueId, ui32 nthWorst); + ui64 GetTimeToAccelerateNs(TLogContext &logCtx, NKikimrBlobStorage::EVDiskQueueId queueId); }; //TGetImpl }//NKikimr diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_impl.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_impl.cpp index 0dbf35619f6b..da777823942c 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_impl.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_impl.cpp @@ -5,8 +5,8 @@ namespace NKikimr { std::atomic TBlobStorageGroupProxy::ThrottlingTimestamp; TBlobStorageGroupProxy::TBlobStorageGroupProxy(TIntrusivePtr&& info, bool forceWaitAllDrives, - TIntrusivePtr &nodeMon, TIntrusivePtr&& storagePoolCounters, - const TControlWrapper &enablePutBatching, const TControlWrapper &enableVPatch) + TIntrusivePtr &nodeMon, TIntrusivePtr&& storagePoolCounters, + const TBlobStorageProxyParameters& params) : GroupId(info->GroupID) , Info(std::move(info)) , Topology(Info->PickTopology()) @@ -14,36 +14,48 @@ namespace NKikimr { , StoragePoolCounters(std::move(storagePoolCounters)) , IsEjected(false) , ForceWaitAllDrives(forceWaitAllDrives) - , EnablePutBatching(enablePutBatching) - , EnableVPatch(enableVPatch) + , UseActorSystemTimeInBSQueue(params.UseActorSystemTimeInBSQueue) + , EnablePutBatching(params.EnablePutBatching) + , EnableVPatch(params.EnableVPatch) + , SlowDiskThreshold(params.SlowDiskThreshold) + , PredictedDelayMultiplier(params.PredictedDelayMultiplier) {} - TBlobStorageGroupProxy::TBlobStorageGroupProxy(ui32 groupId, bool isEjected, TIntrusivePtr &nodeMon, - const TControlWrapper &enablePutBatching, const TControlWrapper &enableVPatch) + TBlobStorageGroupProxy::TBlobStorageGroupProxy(ui32 groupId, bool isEjected,TIntrusivePtr &nodeMon, + const TBlobStorageProxyParameters& params) : GroupId(TGroupId::FromValue(groupId)) , NodeMon(nodeMon) , IsEjected(isEjected) , ForceWaitAllDrives(false) - , EnablePutBatching(enablePutBatching) - , EnableVPatch(enableVPatch) + , UseActorSystemTimeInBSQueue(params.UseActorSystemTimeInBSQueue) + , EnablePutBatching(params.EnablePutBatching) + , EnableVPatch(params.EnableVPatch) + , SlowDiskThreshold(params.SlowDiskThreshold) + , PredictedDelayMultiplier(params.PredictedDelayMultiplier) {} IActor* CreateBlobStorageGroupEjectedProxy(ui32 groupId, TIntrusivePtr &nodeMon) { - return new TBlobStorageGroupProxy(groupId, true, nodeMon, TControlWrapper(false, false, true), - TControlWrapper(false, false, true)); + return new TBlobStorageGroupProxy(groupId, true, nodeMon, + TBlobStorageProxyParameters{ + .EnablePutBatching = TControlWrapper(false, false, true), + .EnableVPatch = TControlWrapper(false, false, true), + .SlowDiskThreshold = TControlWrapper(2000, 1, 1000000), + .PredictedDelayMultiplier = TControlWrapper(1000, 1, 1000000), + } + ); } IActor* CreateBlobStorageGroupProxyConfigured(TIntrusivePtr&& info, bool forceWaitAllDrives, TIntrusivePtr &nodeMon, TIntrusivePtr&& storagePoolCounters, - const TControlWrapper &enablePutBatching, const TControlWrapper &enableVPatch) { + const TBlobStorageProxyParameters& params) { Y_ABORT_UNLESS(info); - return new TBlobStorageGroupProxy(std::move(info), forceWaitAllDrives, nodeMon, std::move(storagePoolCounters), - enablePutBatching, enableVPatch); + return new TBlobStorageGroupProxy(std::move(info), forceWaitAllDrives, nodeMon, + std::move(storagePoolCounters), params); } IActor* CreateBlobStorageGroupProxyUnconfigured(ui32 groupId, TIntrusivePtr &nodeMon, - const TControlWrapper &enablePutBatching, const TControlWrapper &enableVPatch) { - return new TBlobStorageGroupProxy(groupId, false, nodeMon, enablePutBatching, enableVPatch); + const TBlobStorageProxyParameters& params) { + return new TBlobStorageGroupProxy(groupId, false, nodeMon, params); } NActors::NLog::EPriority PriorityForStatusOutbound(NKikimrProto::EReplyStatus status) { diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_impl.h b/ydb/core/blobstorage/dsproxy/dsproxy_impl.h index dd61054fffae..1a282bfeb2ff 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_impl.h +++ b/ydb/core/blobstorage/dsproxy/dsproxy_impl.h @@ -66,6 +66,7 @@ class TBlobStorageGroupProxy : public TActorBootstrapped ui64 UnconfiguredBufferSize = 0; const bool IsEjected; bool ForceWaitAllDrives; + bool UseActorSystemTimeInBSQueue; bool IsLimitedKeyless = false; bool IsFullMonitoring = false; // current state of monitoring ui32 MinREALHugeBlobInBytes = 0; @@ -118,6 +119,11 @@ class TBlobStorageGroupProxy : public TActorBootstrapped bool HasInvalidGroupId() const { return GroupId.GetRawId() == Max(); } void ProcessInitQueue(); + TMemorizableControlWrapper SlowDiskThreshold; + TMemorizableControlWrapper PredictedDelayMultiplier; + + TAccelerationParams GetAccelerationParams(); + //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Enable monitoring @@ -264,6 +270,7 @@ class TBlobStorageGroupProxy : public TActorBootstrapped void HandleNormal(TEvBlobStorage::TEvAssimilate::TPtr &ev); void Handle(TEvBlobStorage::TEvBunchOfEvents::TPtr ev); void Handle(TEvDeathNote::TPtr ev); + void Handle(TEvGetQueuesInfo::TPtr ev); //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Error state @@ -313,10 +320,10 @@ class TBlobStorageGroupProxy : public TActorBootstrapped TBlobStorageGroupProxy(TIntrusivePtr&& info, bool forceWaitAllDrives, TIntrusivePtr &nodeMon, TIntrusivePtr&& storagePoolCounters, - const TControlWrapper &enablePutBatching, const TControlWrapper &enableVPatch); + const TBlobStorageProxyParameters& params); TBlobStorageGroupProxy(ui32 groupId, bool isEjected, TIntrusivePtr &nodeMon, - const TControlWrapper &enablePutBatching, const TControlWrapper &enableVPatch); + const TBlobStorageProxyParameters& params); void Bootstrap(); @@ -360,6 +367,7 @@ class TBlobStorageGroupProxy : public TActorBootstrapped IgnoreFunc(TEvEstablishingSessionTimeout); fFunc(Ev5min, Handle5min); cFunc(EvCheckDeadlines, CheckDeadlines); + hFunc(TEvGetQueuesInfo, Handle); ) #define HANDLE_EVENTS(HANDLER) \ diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_put.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_put.cpp index db036df7a0f1..fc9f65bce668 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_put.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_put.cpp @@ -47,6 +47,7 @@ class TBlobStorageGroupPutRequest : public TBlobStorageGroupRequestActor 0); + RequestsPendingBeforeAcceleration--; + } const ui64 cyclesPerUs = NHPTimer::GetCyclesPerSecond() / 1000000; ev->Get()->Record.MutableTimestamps()->SetReceivedByDSProxyUs(GetCycleCountFast() / cyclesPerUs); @@ -282,6 +291,10 @@ class TBlobStorageGroupPutRequest : public TBlobStorageGroupRequestActor 0); + RequestsPendingBeforeAcceleration--; + } const ui64 cyclesPerUs = NHPTimer::GetCyclesPerSecond() / 1000000; ev->Get()->Record.MutableTimestamps()->SetReceivedByDSProxyUs(GetCycleCountFast() / cyclesPerUs); @@ -371,12 +384,17 @@ class TBlobStorageGroupPutRequest : public TBlobStorageGroupRequestActor 0 && WaitingVDiskCount <= 2 && RequestsSent > 1) { - ui64 timeToAccelerateUs = Max(1, PutImpl.GetTimeToAccelerateNs(LogCtx, 2 - AccelerateRequestsSent) / 1000); - TDuration timeSinceStart = TActivationContext::Monotonic() - StartTime; - if (timeSinceStart.MicroSeconds() < timeToAccelerateUs) { + ui64 timeToAccelerateUs = Max(1, PutImpl.GetTimeToAccelerateNs(LogCtx) / 1000); + if (RequestsPendingBeforeAcceleration == 1 && AccelerateRequestsSent == 1) { + // if there is only one request pending, but first accelerate is unsuccessful, make a pause + timeToAccelerateUs *= 2; + } + TDuration timeToAccelerate = TDuration::MicroSeconds(timeToAccelerateUs); + TMonotonic now = TActivationContext::Monotonic(); + TMonotonic nextAcceleration = StartTime + timeToAccelerate; + if (nextAcceleration > now) { ui64 causeIdx = RootCauseTrack.RegisterAccelerate(); - Schedule(TDuration::MicroSeconds(timeToAccelerateUs - timeSinceStart.MicroSeconds()), - new TEvAccelerate(causeIdx)); + Schedule(nextAcceleration - now, new TEvAccelerate(causeIdx)); IsAccelerateScheduled = true; } else { Accelerate(); @@ -503,13 +521,14 @@ class TBlobStorageGroupPutRequest : public TBlobStorageGroupRequestActorGetTotalVDisksNum()) , HandleClass(params.Common.Event->HandleClass) , ReportedBytes(0) , TimeStatsEnabled(params.TimeStatsEnabled) , Tactic(params.Common.Event->Tactic) , Stats(std::move(params.Stats)) + , AccelerationParams(params.AccelerationParams) , IsMultiPutMode(false) , IncarnationRecords(Info->GetTotalVDisksNum()) , ExpiredVDiskSet(&Info->GetTopology()) @@ -527,7 +546,7 @@ class TBlobStorageGroupPutRequest : public TBlobStorageGroupRequestActorGetTotalVDisksNum()) , IsManyPuts(true) , HandleClass(params.HandleClass) @@ -535,6 +554,7 @@ class TBlobStorageGroupPutRequest : public TBlobStorageGroupRequestActorGetTotalVDisksNum()) , ExpiredVDiskSet(&Info->GetTopology()) diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_put_impl.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_put_impl.cpp index 710fbdf4cfb6..d3ce08c2feef 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_put_impl.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_put_impl.cpp @@ -37,7 +37,7 @@ void TPutImpl::RunStrategy(TLogContext &logCtx, const IStrategy& strategy, TPutR const TBlobStorageGroupInfo::TGroupVDisks& expired) { Y_VERIFY_S(Blackboard.BlobStates.size(), "State# " << DumpFullState()); TBatchedVec finished; - const EStrategyOutcome outcome = Blackboard.RunStrategy(logCtx, strategy, &finished, &expired); + const EStrategyOutcome outcome = Blackboard.RunStrategy(logCtx, strategy, AccelerationParams, &finished, &expired); for (const TBlackboard::TFinishedBlob& item : finished) { Y_ABORT_UNLESS(item.BlobIdx < Blobs.size()); Y_ABORT_UNLESS(!IsDone[item.BlobIdx]); @@ -82,7 +82,7 @@ void TPutImpl::PrepareReply(NKikimrProto::EReplyStatus status, TLogContext &logC } } -ui64 TPutImpl::GetTimeToAccelerateNs(TLogContext &logCtx, ui32 nthWorst) { +ui64 TPutImpl::GetTimeToAccelerateNs(TLogContext &logCtx) { Y_UNUSED(logCtx); Y_ABORT_UNLESS(!Blackboard.BlobStates.empty()); TBatchedVec nthWorstPredictedNsVec(Blackboard.BlobStates.size()); @@ -90,9 +90,9 @@ ui64 TPutImpl::GetTimeToAccelerateNs(TLogContext &logCtx, ui32 nthWorst) { for (auto &[_, state] : Blackboard.BlobStates) { // Find the n'th slowest disk TDiskDelayPredictions worstDisks; - state.GetWorstPredictedDelaysNs(*Info, *Blackboard.GroupQueues, HandleClassToQueueId(Blackboard.PutHandleClass), nthWorst, - &worstDisks); - nthWorstPredictedNsVec[idx++] = worstDisks[nthWorst].PredictedNs; + state.GetWorstPredictedDelaysNs(*Info, *Blackboard.GroupQueues, HandleClassToQueueId(Blackboard.PutHandleClass), + &worstDisks, AccelerationParams.PredictedDelayMultiplier); + nthWorstPredictedNsVec[idx++] = worstDisks[2].PredictedNs; } return *MaxElement(nthWorstPredictedNsVec.begin(), nthWorstPredictedNsVec.end()); } diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_put_impl.h b/ydb/core/blobstorage/dsproxy/dsproxy_put_impl.h index 48300adfd2e2..c292cf44368c 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_put_impl.h +++ b/ydb/core/blobstorage/dsproxy/dsproxy_put_impl.h @@ -44,6 +44,8 @@ class TPutImpl { const TEvBlobStorage::TEvPut::ETactic Tactic; + const TAccelerationParams AccelerationParams; + struct TBlobInfo { TLogoBlobID BlobId; TRope Buffer; @@ -103,7 +105,8 @@ class TPutImpl { public: TPutImpl(const TIntrusivePtr &info, const TIntrusivePtr &state, TEvBlobStorage::TEvPut *ev, const TIntrusivePtr &mon, - bool enableRequestMod3x3ForMinLatecy, TActorId recipient, ui64 cookie, NWilson::TTraceId traceId) + bool enableRequestMod3x3ForMinLatecy, TActorId recipient, ui64 cookie, NWilson::TTraceId traceId, + const TAccelerationParams& accelerationParams) : Info(info) , Blackboard(info, state, ev->HandleClass, NKikimrBlobStorage::EGetHandleClass::AsyncRead) , IsDone(1) @@ -113,6 +116,7 @@ class TPutImpl { , Mon(mon) , EnableRequestMod3x3ForMinLatecy(enableRequestMod3x3ForMinLatecy) , Tactic(ev->Tactic) + , AccelerationParams(accelerationParams) { BlobMap.emplace(ev->Id, Blobs.size()); Blobs.emplace_back(ev->Id, TRope(ev->Buffer), recipient, cookie, std::move(traceId), std::move(ev->Orbit), @@ -126,7 +130,7 @@ class TPutImpl { TPutImpl(const TIntrusivePtr &info, const TIntrusivePtr &state, TBatchedVec &events, const TIntrusivePtr &mon, NKikimrBlobStorage::EPutHandleClass putHandleClass, TEvBlobStorage::TEvPut::ETactic tactic, - bool enableRequestMod3x3ForMinLatecy) + bool enableRequestMod3x3ForMinLatecy, const TAccelerationParams& accelerationParams) : Info(info) , Blackboard(info, state, putHandleClass, NKikimrBlobStorage::EGetHandleClass::AsyncRead) , IsDone(events.size()) @@ -136,6 +140,7 @@ class TPutImpl { , Mon(mon) , EnableRequestMod3x3ForMinLatecy(enableRequestMod3x3ForMinLatecy) , Tactic(tactic) + , AccelerationParams(accelerationParams) { Y_ABORT_UNLESS(events.size(), "TEvPut vector is empty"); @@ -186,7 +191,7 @@ class TPutImpl { void PrepareOneReply(NKikimrProto::EReplyStatus status, size_t blobIdx, TLogContext &logCtx, TString errorReason, TPutResultVec &outPutResults); - ui64 GetTimeToAccelerateNs(TLogContext &logCtx, ui32 nthWorst); + ui64 GetTimeToAccelerateNs(TLogContext &logCtx); TString DumpFullState() const; diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_request.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_request.cpp index fe85b916350c..b3691e0f8e7b 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_request.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_request.cpp @@ -110,7 +110,8 @@ namespace NKikimr { .LogAccEnabled = ev->Get()->IsVerboseNoDataEnabled || ev->Get()->CollectDebugInfo, .LatencyQueueKind = kind, }, - .NodeLayout = TNodeLayoutInfoPtr(NodeLayoutInfo) + .NodeLayout = TNodeLayoutInfoPtr(NodeLayoutInfo), + .AccelerationParams = GetAccelerationParams(), }), ev->Get()->Deadline ); @@ -223,6 +224,7 @@ namespace NKikimr { .TimeStatsEnabled = Mon->TimeStats.IsEnabled(), .Stats = PerDiskStats, .EnableRequestMod3x3ForMinLatency = enableRequestMod3x3ForMinLatency, + .AccelerationParams = GetAccelerationParams(), }), ev->Get()->Deadline ); @@ -496,6 +498,7 @@ namespace NKikimr { .TimeStatsEnabled = Mon->TimeStats.IsEnabled(), .Stats = PerDiskStats, .EnableRequestMod3x3ForMinLatency = enableRequestMod3x3ForMinLatency, + .AccelerationParams = GetAccelerationParams(), }), ev->Get()->Deadline ); @@ -517,6 +520,7 @@ namespace NKikimr { .HandleClass = handleClass, .Tactic = tactic, .EnableRequestMod3x3ForMinLatency = enableRequestMod3x3ForMinLatency, + .AccelerationParams = GetAccelerationParams(), }), TInstant::Max() ); @@ -560,4 +564,18 @@ namespace NKikimr { BatchedGetRequestCount = 0; } + void TBlobStorageGroupProxy::Handle(TEvGetQueuesInfo::TPtr ev) { + ui32 groupSize = Info->GetTotalVDisksNum(); + std::unique_ptr res = std::make_unique(groupSize); + if (Sessions && Sessions->GroupQueues) { + for (ui32 orderNum = 0; orderNum < groupSize; ++orderNum) { + TGroupQueues::TVDisk* vdisk = Sessions->GroupQueues->DisksByOrderNumber[orderNum]; + if (vdisk) { + const TGroupQueues::TVDisk::TQueues::TQueue& queue = vdisk->Queues.GetQueue(ev->Get()->QueueId); + res->AddInfoForQueue(orderNum, queue.ActorId, queue.FlowRecord); + } + } + } + TActivationContext::Send(ev->Sender, std::move(res)); + } } // NKikimr diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_state.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_state.cpp index 079ce5e2d5df..4b0b10450dfc 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_state.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_state.cpp @@ -150,7 +150,8 @@ namespace NKikimr { } } else { // this is the first time configuration arrives -- no queues are created yet EnsureMonitoring(false); - Sessions = MakeIntrusive(Info, BSProxyCtx, MonActor, SelfId()); + Sessions = MakeIntrusive(Info, BSProxyCtx, MonActor, SelfId(), + UseActorSystemTimeInBSQueue); NumUnconnectedDisks = Sessions->GetNumUnconnectedDisks(); NodeMon->IncNumUnconnected(NumUnconnectedDisks); } @@ -322,4 +323,11 @@ namespace NKikimr { Send(ev->Sender, new TEvProxySessionsState(Sessions ? Sessions->GroupQueues : nullptr)); } + TAccelerationParams TBlobStorageGroupProxy::GetAccelerationParams() { + return TAccelerationParams{ + .SlowDiskThreshold = .001f * SlowDiskThreshold.Update(TActivationContext::Now()), + .PredictedDelayMultiplier = .001f * PredictedDelayMultiplier.Update(TActivationContext::Now()), + }; + } + } // NKikimr diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_strategy_accelerate_put.h b/ydb/core/blobstorage/dsproxy/dsproxy_strategy_accelerate_put.h index 3b40f5374a4c..4256081130b3 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_strategy_accelerate_put.h +++ b/ydb/core/blobstorage/dsproxy/dsproxy_strategy_accelerate_put.h @@ -11,7 +11,9 @@ namespace NKikimr { class TAcceleratePutStrategy : public TStrategyBase { public: EStrategyOutcome Process(TLogContext &logCtx, TBlobState &state, const TBlobStorageGroupInfo &info, - TBlackboard& /*blackboard*/, TGroupDiskRequests &groupDiskRequests) override { + TBlackboard& /*blackboard*/, TGroupDiskRequests &groupDiskRequests, + const TAccelerationParams& accelerationParams) override { + Y_UNUSED(accelerationParams); // Find the unput part and disk TStackVec badDiskIdxs; for (size_t diskIdx = 0; diskIdx < state.Disks.size(); ++diskIdx) { diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_strategy_accelerate_put_m3dc.h b/ydb/core/blobstorage/dsproxy/dsproxy_strategy_accelerate_put_m3dc.h index 03ba79f7b0db..69804e441002 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_strategy_accelerate_put_m3dc.h +++ b/ydb/core/blobstorage/dsproxy/dsproxy_strategy_accelerate_put_m3dc.h @@ -30,25 +30,25 @@ class TAcceleratePut3dcStrategy : public TStrategyBase { } EStrategyOutcome Process(TLogContext &logCtx, TBlobState &state, const TBlobStorageGroupInfo &info, - TBlackboard& /*blackboard*/, TGroupDiskRequests &groupDiskRequests) override { - // Find the unput part and disk - i32 badDiskIdx = -1; + TBlackboard& /*blackboard*/, TGroupDiskRequests &groupDiskRequests, + const TAccelerationParams& accelerationParams) override { + Y_UNUSED(accelerationParams); + // Find the unput parts and disks + ui32 badDiskMask = 0; for (size_t diskIdx = 0; diskIdx < state.Disks.size(); ++diskIdx) { TBlobState::TDisk &disk = state.Disks[diskIdx]; for (size_t partIdx = 0; partIdx < disk.DiskParts.size(); ++partIdx) { TBlobState::TDiskPart &diskPart = disk.DiskParts[partIdx]; if (diskPart.Situation == TBlobState::ESituation::Sent) { - badDiskIdx = diskIdx; + badDiskMask |= (1 << diskIdx); } } } - - if (badDiskIdx >= 0) { + if (badDiskMask > 0) { // Mark the 'bad' disk as the single slow disk for (size_t diskIdx = 0; diskIdx < state.Disks.size(); ++diskIdx) { - state.Disks[diskIdx].IsSlow = false; + state.Disks[diskIdx].IsSlow = badDiskMask & (1 << diskIdx); } - state.Disks[badDiskIdx].IsSlow = true; // Prepare part placement if possible TBlobStorageGroupType::TPartPlacement partPlacement; diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_strategy_base.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_strategy_base.cpp index 9243f693990c..690765f9d48f 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_strategy_base.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_strategy_base.cpp @@ -389,7 +389,7 @@ void TStrategyBase::Prepare3dcPartPlacement(const TBlobState &state, } ui32 TStrategyBase::MakeSlowSubgroupDiskMask(TBlobState &state, const TBlobStorageGroupInfo &info, TBlackboard &blackboard, - bool isPut) { + bool isPut, const TAccelerationParams& accelerationParams) { if (info.GetTotalVDisksNum() == 1) { // when there is only one disk, we consider it not slow return 0; @@ -400,12 +400,13 @@ ui32 TStrategyBase::MakeSlowSubgroupDiskMask(TBlobState &state, const TBlobStora TDiskDelayPredictions worstDisks; state.GetWorstPredictedDelaysNs(info, *blackboard.GroupQueues, (isPut ? HandleClassToQueueId(blackboard.PutHandleClass) : - HandleClassToQueueId(blackboard.GetHandleClass)), 1, - &worstDisks); + HandleClassToQueueId(blackboard.GetHandleClass)), + &worstDisks, accelerationParams.PredictedDelayMultiplier); // Check if the slowest disk exceptionally slow, or just not very fast ui32 slowDiskSubgroupMask = 0; - if (worstDisks[1].PredictedNs > 0 && worstDisks[0].PredictedNs > worstDisks[1].PredictedNs * 2) { + if (worstDisks[1].PredictedNs > 0 && worstDisks[0].PredictedNs > worstDisks[1].PredictedNs * + accelerationParams.SlowDiskThreshold) { slowDiskSubgroupMask = 1 << worstDisks[0].DiskIdx; } diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_strategy_base.h b/ydb/core/blobstorage/dsproxy/dsproxy_strategy_base.h index 67da2e3fef3e..ce03192fc93d 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_strategy_base.h +++ b/ydb/core/blobstorage/dsproxy/dsproxy_strategy_base.h @@ -51,8 +51,9 @@ class TStrategyBase : public IStrategy { ui8 preferredReplicasPerRealm, bool considerSlowAsError, TBlobStorageGroupType::TPartPlacement &outPartPlacement); // Sets IsSlow for the slow disk, resets for other disks. - // Returns -1 if there is no slow disk, or subgroupIdx of the slow disk. - ui32 MakeSlowSubgroupDiskMask(TBlobState &state, const TBlobStorageGroupInfo &info, TBlackboard &blackboard, bool isPut); + // returns bit mask with 1 on positions of slow disks + ui32 MakeSlowSubgroupDiskMask(TBlobState &state, const TBlobStorageGroupInfo &info, TBlackboard &blackboard, bool isPut, + const TAccelerationParams& accelerationParams); }; diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_strategy_get_bold.h b/ydb/core/blobstorage/dsproxy/dsproxy_strategy_get_bold.h index 5788df96cd0a..134df866a233 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_strategy_get_bold.h +++ b/ydb/core/blobstorage/dsproxy/dsproxy_strategy_get_bold.h @@ -13,7 +13,9 @@ class TBoldStrategy : public TStrategyBase { }; EStrategyOutcome Process(TLogContext &logCtx, TBlobState &state, const TBlobStorageGroupInfo &info, - TBlackboard& /*blackboard*/, TGroupDiskRequests &groupDiskRequests) override { + TBlackboard& /*blackboard*/, TGroupDiskRequests &groupDiskRequests, + const TAccelerationParams& accelerationParams) override { + Y_UNUSED(accelerationParams); // Look at the current layout and set the status if possible const ui32 totalPartCount = info.Type.TotalPartCount(); bool doLook = true; diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_strategy_get_m3dc_basic.h b/ydb/core/blobstorage/dsproxy/dsproxy_strategy_get_m3dc_basic.h index 1e4c9acfb584..a5782426c66d 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_strategy_get_m3dc_basic.h +++ b/ydb/core/blobstorage/dsproxy/dsproxy_strategy_get_m3dc_basic.h @@ -55,7 +55,8 @@ namespace NKikimr { } EStrategyOutcome Process(TLogContext& logCtx, TBlobState& state, const TBlobStorageGroupInfo& info, - TBlackboard &blackboard, TGroupDiskRequests& groupDiskRequests) override { + TBlackboard &blackboard, TGroupDiskRequests& groupDiskRequests, + const TAccelerationParams& accelerationParams) override { if (state.WholeSituation == TBlobState::ESituation::Present) { return EStrategyOutcome::DONE; } @@ -76,13 +77,14 @@ namespace NKikimr { case TBlackboard::AccelerationModeSkipOneSlowest: { TDiskDelayPredictions worstDisks; state.GetWorstPredictedDelaysNs(info, *blackboard.GroupQueues, - HandleClassToQueueId(blackboard.GetHandleClass), 1, - &worstDisks); + HandleClassToQueueId(blackboard.GetHandleClass), + &worstDisks, accelerationParams.PredictedDelayMultiplier); // Check if the slowest disk exceptionally slow, or just not very fast i32 slowDiskSubgroupIdx = -1; - if (worstDisks[1].PredictedNs > 0 && worstDisks[0].PredictedNs > worstDisks[1].PredictedNs * 2) { - slowDiskSubgroupIdx = worstDisks[1].DiskIdx; + if (worstDisks[1].PredictedNs > 0 && worstDisks[0].PredictedNs > + worstDisks[1].PredictedNs * accelerationParams.SlowDiskThreshold) { + slowDiskSubgroupIdx = worstDisks[0].DiskIdx; } // Mark single slow disk diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_strategy_get_m3dc_restore.h b/ydb/core/blobstorage/dsproxy/dsproxy_strategy_get_m3dc_restore.h index 84c11bc47474..9ed0256a41ed 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_strategy_get_m3dc_restore.h +++ b/ydb/core/blobstorage/dsproxy/dsproxy_strategy_get_m3dc_restore.h @@ -14,7 +14,8 @@ namespace NKikimr { public: EStrategyOutcome Process(TLogContext& logCtx, TBlobState& state, const TBlobStorageGroupInfo& info, - TBlackboard& blackboard, TGroupDiskRequests& groupDiskRequests) override { + TBlackboard& blackboard, TGroupDiskRequests& groupDiskRequests, + const TAccelerationParams& accelerationParams) override { if (state.WholeSituation == TBlobState::ESituation::Present) { return EStrategyOutcome::DONE; } @@ -55,7 +56,7 @@ namespace NKikimr { state.Id.ToString().c_str(), ui32(state.WholeSituation)); state.WholeSituation = TBlobState::ESituation::Present; const EStrategyOutcome outcome = TPut3dcStrategy(TEvBlobStorage::TEvPut::TacticMaxThroughput, false).Process(logCtx, - state, info, blackboard, groupDiskRequests); + state, info, blackboard, groupDiskRequests, accelerationParams); switch (outcome) { case EStrategyOutcome::IN_PROGRESS: state.WholeSituation = TBlobState::ESituation::Unknown; diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_strategy_get_m3of4.h b/ydb/core/blobstorage/dsproxy/dsproxy_strategy_get_m3of4.h index aec4efb8592a..4bb43f816ef9 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_strategy_get_m3of4.h +++ b/ydb/core/blobstorage/dsproxy/dsproxy_strategy_get_m3of4.h @@ -10,7 +10,9 @@ namespace NKikimr { class TMirror3of4GetStrategy : public TMirror3of4StrategyBase { public: EStrategyOutcome Process(TLogContext& /*logCtx*/, TBlobState& state, const TBlobStorageGroupInfo& info, - TBlackboard& /*blackboard*/, TGroupDiskRequests& groupDiskRequests) override { + TBlackboard& /*blackboard*/, TGroupDiskRequests& groupDiskRequests, + const TAccelerationParams& accelerationParams) override { + Y_UNUSED(accelerationParams); if (!CheckFailModel(state, info)) { state.WholeSituation = TBlobState::ESituation::Error; return EStrategyOutcome::Error("failure model exceeded"); diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_strategy_get_min_iops_block.h b/ydb/core/blobstorage/dsproxy/dsproxy_strategy_get_min_iops_block.h index 5829bd7f35b6..2d0efd8f8615 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_strategy_get_min_iops_block.h +++ b/ydb/core/blobstorage/dsproxy/dsproxy_strategy_get_min_iops_block.h @@ -288,7 +288,8 @@ class TMinIopsBlockStrategy : public TStrategyBase { } EStrategyOutcome Process(TLogContext &logCtx, TBlobState &state, const TBlobStorageGroupInfo &info, - TBlackboard& blackboard, TGroupDiskRequests &groupDiskRequests) override { + TBlackboard& blackboard, TGroupDiskRequests &groupDiskRequests, + const TAccelerationParams& accelerationParams) override { if (auto res = RestoreWholeFromDataParts(logCtx, state, info)) { return *res; } else if (auto res = RestoreWholeWithErasure(logCtx, state, info)) { @@ -311,7 +312,8 @@ class TMinIopsBlockStrategy : public TStrategyBase { // Try excluding the slow disk bool isDone = false; // TODO: Mark disk that does not answer when accelerating requests - ui32 slowDiskSubgroupMask = MakeSlowSubgroupDiskMask(state, info, blackboard, false); + ui32 slowDiskSubgroupMask = MakeSlowSubgroupDiskMask(state, info, blackboard, false, + accelerationParams); if (slowDiskSubgroupMask >= 0) { TBlobStorageGroupInfo::EBlobState fastPessimisticState = TBlobStorageGroupInfo::EBS_DISINTEGRATED; TBlobStorageGroupInfo::EBlobState fastOptimisticState = TBlobStorageGroupInfo::EBS_DISINTEGRATED; diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_strategy_get_min_iops_mirror.h b/ydb/core/blobstorage/dsproxy/dsproxy_strategy_get_min_iops_mirror.h index 38a7f0c9e78a..8da71d39ee9f 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_strategy_get_min_iops_mirror.h +++ b/ydb/core/blobstorage/dsproxy/dsproxy_strategy_get_min_iops_mirror.h @@ -18,7 +18,9 @@ class TMinIopsMirrorStrategy : public TStrategyBase { } EStrategyOutcome Process(TLogContext &logCtx, TBlobState &state, const TBlobStorageGroupInfo &info, - TBlackboard& /*blackboard*/, TGroupDiskRequests &groupDiskRequests) override { + TBlackboard& /*blackboard*/, TGroupDiskRequests &groupDiskRequests, + const TAccelerationParams& accelerationParams) override { + Y_UNUSED(accelerationParams); if (auto res = RestoreWholeFromDataParts(logCtx, state, info)) { return *res; } diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_strategy_put_m3dc.h b/ydb/core/blobstorage/dsproxy/dsproxy_strategy_put_m3dc.h index ed07074072fe..e6e9a376f720 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_strategy_put_m3dc.h +++ b/ydb/core/blobstorage/dsproxy/dsproxy_strategy_put_m3dc.h @@ -31,11 +31,12 @@ class TPut3dcStrategy : public TStrategyBase { } EStrategyOutcome Process(TLogContext &logCtx, TBlobState &state, const TBlobStorageGroupInfo &info, - TBlackboard& blackboard, TGroupDiskRequests &groupDiskRequests) override { + TBlackboard& blackboard, TGroupDiskRequests &groupDiskRequests, + const TAccelerationParams& accelerationParams) override { TBlobStorageGroupType::TPartPlacement partPlacement; bool degraded = false; bool isDone = false; - ui32 slowDiskSubgroupMask = MakeSlowSubgroupDiskMask(state, info, blackboard, true); + ui32 slowDiskSubgroupMask = MakeSlowSubgroupDiskMask(state, info, blackboard, true, accelerationParams); do { if (slowDiskSubgroupMask == 0) { break; // ignore this case diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_strategy_put_m3of4.h b/ydb/core/blobstorage/dsproxy/dsproxy_strategy_put_m3of4.h index 3a3bfbefcd6b..457ae37f6a62 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_strategy_put_m3of4.h +++ b/ydb/core/blobstorage/dsproxy/dsproxy_strategy_put_m3of4.h @@ -15,7 +15,9 @@ class TPut3of4Strategy : public TMirror3of4StrategyBase { {} EStrategyOutcome Process(TLogContext& /*logCtx*/, TBlobState& state, const TBlobStorageGroupInfo& info, - TBlackboard& /*blackboard*/, TGroupDiskRequests& groupDiskRequests) override { + TBlackboard& /*blackboard*/, TGroupDiskRequests& groupDiskRequests, + const TAccelerationParams& accelerationParams) override { + Y_UNUSED(accelerationParams); if (!CheckFailModel(state, info)) { state.WholeSituation = TBlobState::ESituation::Error; return EStrategyOutcome::Error("failure model exceeded"); diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_strategy_restore.h b/ydb/core/blobstorage/dsproxy/dsproxy_strategy_restore.h index 5be8e482a663..4a2026c9a86e 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_strategy_restore.h +++ b/ydb/core/blobstorage/dsproxy/dsproxy_strategy_restore.h @@ -80,7 +80,8 @@ class TRestoreStrategy : public TStrategyBase { } EStrategyOutcome Process(TLogContext &logCtx, TBlobState &state, const TBlobStorageGroupInfo &info, - TBlackboard &blackboard, TGroupDiskRequests &groupDiskRequests) override { + TBlackboard &blackboard, TGroupDiskRequests &groupDiskRequests, + const TAccelerationParams& accelerationParams) override { // Check if the work is already done. if (state.WholeSituation == TBlobState::ESituation::Absent) { return EStrategyOutcome::DONE; // nothing to restore @@ -130,11 +131,12 @@ class TRestoreStrategy : public TStrategyBase { // Find the slowest disk, if there are more than 1 TDiskDelayPredictions worstDisks; state.GetWorstPredictedDelaysNs(info, *blackboard.GroupQueues, - HandleClassToQueueId(blackboard.PutHandleClass), 1, - &worstDisks); + HandleClassToQueueId(blackboard.PutHandleClass), + &worstDisks, accelerationParams.PredictedDelayMultiplier); // Check if the slowest disk exceptionally slow, or just not very fast - if (worstDisks[1].PredictedNs > 0 && worstDisks[0].PredictedNs > worstDisks[1].PredictedNs * 2) { + if (worstDisks[1].PredictedNs > 0 && worstDisks[0].PredictedNs > worstDisks[1].PredictedNs * + accelerationParams.SlowDiskThreshold) { slowDiskSubgroupIdxs.push_back(worstDisks[0].DiskIdx); } } diff --git a/ydb/core/blobstorage/dsproxy/group_sessions.cpp b/ydb/core/blobstorage/dsproxy/group_sessions.cpp index 2623218cadd3..8569a94ad948 100644 --- a/ydb/core/blobstorage/dsproxy/group_sessions.cpp +++ b/ydb/core/blobstorage/dsproxy/group_sessions.cpp @@ -28,7 +28,7 @@ TString QueueIdName(NKikimrBlobStorage::EVDiskQueueId queueId) { } TGroupSessions::TGroupSessions(const TIntrusivePtr& info, const TBSProxyContextPtr& bspctx, - const TActorId& monActor, const TActorId& proxyActor) + const TActorId& monActor, const TActorId& proxyActor, bool useActorSystemTimeInBSQueue) : GroupQueues(MakeIntrusive(info->GetTopology())) , ConnectedQueuesMask(info->GetTotalVDisksNum(), 0) , MonActor(monActor) @@ -71,7 +71,7 @@ TGroupSessions::TGroupSessions(const TIntrusivePtr& info, std::unique_ptr queueActor(CreateVDiskBackpressureClient(info, vd, queueId, counters, bspctx, NBackpressure::TQueueClientId(NBackpressure::EQueueClientType::DSProxy, nodeId), QueueIdName(queueId), interconnectChannel, nodeId == targetNodeId, TDuration::Minutes(1), flowRecord, - NMonitoring::TCountableBase::EVisibility::Public)); + NMonitoring::TCountableBase::EVisibility::Public, useActorSystemTimeInBSQueue)); TActorId queue = TActivationContext::Register(queueActor.release(), ProxyActor, TMailboxType::ReadAsFilled, AppData()->SystemPoolId); diff --git a/ydb/core/blobstorage/dsproxy/group_sessions.h b/ydb/core/blobstorage/dsproxy/group_sessions.h index 1cd731698304..16d9b804421d 100644 --- a/ydb/core/blobstorage/dsproxy/group_sessions.h +++ b/ydb/core/blobstorage/dsproxy/group_sessions.h @@ -242,7 +242,7 @@ namespace NKikimr { TActorId ProxyActor; TGroupSessions(const TIntrusivePtr& info, const TBSProxyContextPtr& bspctx, - const TActorId& monActor, const TActorId& proxyActor); + const TActorId& monActor, const TActorId& proxyActor, bool useActorSystemTimeInBSQueue); void Poison(); bool GoodToGo(const TBlobStorageGroupInfo::TTopology& topology, bool waitForAllVDisks); void QueueConnectUpdate(ui32 orderNumber, NKikimrBlobStorage::EVDiskQueueId queueId, bool connected, @@ -262,4 +262,49 @@ namespace NKikimr { {} }; + struct TEvGetQueuesInfo : public TEventLocal { + NKikimrBlobStorage::EVDiskQueueId QueueId; + + TEvGetQueuesInfo(NKikimrBlobStorage::EVDiskQueueId queueId) + : QueueId(queueId) + {} + }; + + struct TEvQueuesInfo : public TEventLocal { + struct TQueueInfo { + TActorId ActorId; + TIntrusivePtr FlowRecord; + }; + + TEvQueuesInfo(ui32 groupSize) { + Queues.resize(groupSize); + } + + void AddInfoForQueue(ui32 orderNumber, TActorId actorId, const TIntrusivePtr& flowRecord) { + Queues[orderNumber].emplace(TQueueInfo{ + .ActorId = actorId, + .FlowRecord = flowRecord + }); + } + + TString ToString() const override { + TStringStream str; + str << "{ TEvQueuesInfo"; + str << " Queues ["; + for (ui32 orderNum = 0; orderNum < Queues.size(); ++orderNum) { + const std::optional& queue = Queues[orderNum]; + if (queue) { + str << " { OrderNumber# " << orderNum + << " ActorId# " << queue->ActorId.ToString() << " },"; + } else { + str << " {}"; + } + } + str << " ] }"; + return str.Str(); + } + + TStackVec, TypicalDisksInGroup> Queues; + }; + } // NKikimr diff --git a/ydb/core/blobstorage/dsproxy/ut/dsproxy_env_mock_ut.h b/ydb/core/blobstorage/dsproxy/ut/dsproxy_env_mock_ut.h index 6ba8e8bffd99..39a22ead6a94 100644 --- a/ydb/core/blobstorage/dsproxy/ut/dsproxy_env_mock_ut.h +++ b/ydb/core/blobstorage/dsproxy/ut/dsproxy_env_mock_ut.h @@ -81,8 +81,16 @@ struct TDSProxyEnv { TIntrusivePtr storagePoolCounters = perPoolCounters.GetPoolCounters("pool_name"); TControlWrapper enablePutBatching(DefaultEnablePutBatching, false, true); TControlWrapper enableVPatch(DefaultEnableVPatch, false, true); + TControlWrapper slowDiskThreshold(DefaultSlowDiskThreshold * 1000, 1, 1000000); + TControlWrapper predictedDelayMultiplier(DefaultPredictedDelayMultiplier * 1000, 1, 1000000); IActor *dsproxy = CreateBlobStorageGroupProxyConfigured(TIntrusivePtr(Info), true, nodeMon, - std::move(storagePoolCounters), enablePutBatching, enableVPatch); + std::move(storagePoolCounters), TBlobStorageProxyParameters{ + .EnablePutBatching = enablePutBatching, + .EnableVPatch = enableVPatch, + .SlowDiskThreshold = slowDiskThreshold, + .PredictedDelayMultiplier = predictedDelayMultiplier, + } + ); TActorId actorId = runtime.Register(dsproxy, nodeIndex); runtime.RegisterService(RealProxyActorId, actorId, nodeIndex); diff --git a/ydb/core/blobstorage/dsproxy/ut/dsproxy_get_ut.cpp b/ydb/core/blobstorage/dsproxy/ut/dsproxy_get_ut.cpp index 730ef996b5ff..adf673bff63e 100644 --- a/ydb/core/blobstorage/dsproxy/ut/dsproxy_get_ut.cpp +++ b/ydb/core/blobstorage/dsproxy/ut/dsproxy_get_ut.cpp @@ -71,7 +71,7 @@ void TestIntervalsAndCrcAllOk(TErasureType::EErasureSpecies erasureSpecies, bool TEvBlobStorage::TEvGet ev(queriesA, queryCount, TInstant::Max(), NKikimrBlobStorage::EGetHandleClass::FastRead, false, false); ev.IsVerboseNoDataEnabled = isVerboseNoDataEnabled; - TGetImpl getImpl(group.GetInfo(), groupQueues, &ev, nullptr); + TGetImpl getImpl(group.GetInfo(), groupQueues, &ev, nullptr, TAccelerationParams{}); TDeque> vGets; TLogContext logCtx(NKikimrServices::BS_PROXY_GET, false); logCtx.LogAcc.IsLogEnabled = false; @@ -324,7 +324,7 @@ class TTestWipedAllOkStep { TEvBlobStorage::TEvGet ev(queriesA, queryCount, TInstant::Max(), NKikimrBlobStorage::EGetHandleClass::FastRead, IsRestore, false); ev.IsVerboseNoDataEnabled = IsVerboseNoDataEnabled; - TGetImpl getImpl(Group->GetInfo(), GroupQueues, &ev, nullptr); + TGetImpl getImpl(Group->GetInfo(), GroupQueues, &ev, nullptr, TAccelerationParams{}); ClearCounters(); TDeque> vGets; TDeque> vPuts; @@ -452,7 +452,7 @@ class TGetSimulator { TAutoPtr Simulate(TEvBlobStorage::TEvGet *ev) { TAutoPtr getResult; - TGetImpl getImpl(Group.GetInfo(), GroupQueues, ev, nullptr); + TGetImpl getImpl(Group.GetInfo(), GroupQueues, ev, nullptr, TAccelerationParams{}); TDeque> vGets; TDeque> vPuts; TLogContext logCtx(NKikimrServices::BS_PROXY_GET, false); @@ -563,7 +563,7 @@ Y_UNIT_TEST(TestBlock42VGetCountWithErasure) { TAutoPtr getResult; - TGetImpl getImpl(group.GetInfo(), groupQueues, &ev, nullptr); + TGetImpl getImpl(group.GetInfo(), groupQueues, &ev, nullptr, TAccelerationParams{}); TDeque> vGets; TDeque> vPuts; TLogContext logCtx(NKikimrServices::BS_PROXY_GET, false); @@ -703,7 +703,7 @@ Y_UNIT_TEST(TestBlock42WipedOneDiskAndErrorDurringGet) { TAutoPtr getResult; - TGetImpl getImpl(group.GetInfo(), groupQueues, &ev, nullptr); + TGetImpl getImpl(group.GetInfo(), groupQueues, &ev, nullptr, TAccelerationParams{}); TDeque> vGets; TDeque> vPuts; TLogContext logCtx(NKikimrServices::BS_PROXY_GET, false); @@ -974,7 +974,7 @@ void TestWipedErrorWithTwoBlobs(TErasureType::EErasureSpecies erasureSpecies, bo TAutoPtr getResult; - TGetImpl getImpl(group.GetInfo(), groupQueues, &ev, nullptr); + TGetImpl getImpl(group.GetInfo(), groupQueues, &ev, nullptr, TAccelerationParams{}); TDeque> vGets; TDeque> vPuts; TLogContext logCtx(NKikimrServices::BS_PROXY_GET, false); @@ -1192,7 +1192,7 @@ class TTestPossibleBlobLost { TEvBlobStorage::TEvGet ev(queriesA, MaxQueryCount, TInstant::Max(), NKikimrBlobStorage::EGetHandleClass::Discover, true, false); ev.IsVerboseNoDataEnabled = false; - TGetImpl getImpl(Group.GetInfo(), groupQueues, &ev, nullptr); + TGetImpl getImpl(Group.GetInfo(), groupQueues, &ev, nullptr, TAccelerationParams{}); TDeque> vGets; getImpl.GenerateInitialRequests(logCtx, vGets); return vGets.size(); @@ -1257,7 +1257,7 @@ class TTestPossibleBlobLost { TEvBlobStorage::TEvGet ev(queriesA, MaxQueryCount, TInstant::Max(), NKikimrBlobStorage::EGetHandleClass::Discover, true, false); ev.IsVerboseNoDataEnabled = false; - TGetImpl getImpl(Group.GetInfo(), groupQueues, &ev, nullptr); + TGetImpl getImpl(Group.GetInfo(), groupQueues, &ev, nullptr, TAccelerationParams{}); TDeque> vGets; TDeque> vPuts; getImpl.GenerateInitialRequests(logCtx, vGets); @@ -1404,7 +1404,7 @@ class TTestNoDataRegression { TEvBlobStorage::TEvGet ev(queriesA, MaxQueryCount, TInstant::Max(), NKikimrBlobStorage::EGetHandleClass::Discover, true, false); ev.IsVerboseNoDataEnabled = false; - TGetImpl getImpl(Group.GetInfo(), groupQueues, &ev, nullptr); + TGetImpl getImpl(Group.GetInfo(), groupQueues, &ev, nullptr, TAccelerationParams{}); TDeque> vGets; getImpl.GenerateInitialRequests(logCtx, vGets); return vGets.size(); @@ -1469,7 +1469,7 @@ class TTestNoDataRegression { TEvBlobStorage::TEvGet ev(queriesA, MaxQueryCount, TInstant::Max(), NKikimrBlobStorage::EGetHandleClass::Discover, true, false); ev.IsVerboseNoDataEnabled = false; - TGetImpl getImpl(Group.GetInfo(), groupQueues, &ev, nullptr); + TGetImpl getImpl(Group.GetInfo(), groupQueues, &ev, nullptr, TAccelerationParams{}); TDeque> vGets; TDeque> vPuts; getImpl.GenerateInitialRequests(logCtx, vGets); diff --git a/ydb/core/blobstorage/dsproxy/ut/dsproxy_put_ut.cpp b/ydb/core/blobstorage/dsproxy/ut/dsproxy_put_ut.cpp index 2af3baccace5..fc6390a32ca4 100644 --- a/ydb/core/blobstorage/dsproxy/ut/dsproxy_put_ut.cpp +++ b/ydb/core/blobstorage/dsproxy/ut/dsproxy_put_ut.cpp @@ -66,7 +66,7 @@ void TestPutMaxPartCountOnHandoff(TErasureType::EErasureSpecies erasureSpecies) TEvBlobStorage::TEvPut ev(blobId, data, TInstant::Max(), NKikimrBlobStorage::TabletLog, TEvBlobStorage::TEvPut::TacticDefault); - TPutImpl putImpl(group.GetInfo(), groupQueues, &ev, mon, false, TActorId(), 0, NWilson::TTraceId()); + TPutImpl putImpl(group.GetInfo(), groupQueues, &ev, mon, false, TActorId(), 0, NWilson::TTraceId(), TAccelerationParams{}); for (ui32 idx = 0; idx < domainCount; ++idx) { group.SetPredictedDelayNs(idx, 1); @@ -302,10 +302,11 @@ struct TTestPutAllOk { TMaybe putImpl; TPutImpl::TPutResultVec putResults; if constexpr (IsVPut) { - putImpl.ConstructInPlace(Group.GetInfo(), GroupQueues, events[0]->Get(), Mon, false, TActorId(), 0, NWilson::TTraceId()); + putImpl.ConstructInPlace(Group.GetInfo(), GroupQueues, events[0]->Get(), Mon, false, TActorId(), 0, NWilson::TTraceId(), + TAccelerationParams{}); } else { putImpl.ConstructInPlace(Group.GetInfo(), GroupQueues, events, Mon, - NKikimrBlobStorage::TabletLog, TEvBlobStorage::TEvPut::TacticDefault, false); + NKikimrBlobStorage::TabletLog, TEvBlobStorage::TEvPut::TacticDefault, false, TAccelerationParams{}); } putImpl->GenerateInitialRequests(LogCtx, PartSets); @@ -352,7 +353,7 @@ Y_UNIT_TEST(TestMirror3dcWith3x3MinLatencyMod) { TString data = AlphaData(size); TEvBlobStorage::TEvPut ev(blobId, data, TInstant::Max(), NKikimrBlobStorage::TabletLog, TEvBlobStorage::TEvPut::TacticMinLatency); - TPutImpl putImpl(env.Info, env.GroupQueues, &ev, env.Mon, true, TActorId(), 0, NWilson::TTraceId()); + TPutImpl putImpl(env.Info, env.GroupQueues, &ev, env.Mon, true, TActorId(), 0, NWilson::TTraceId(), TAccelerationParams{}); TLogContext logCtx(NKikimrServices::BS_PROXY_PUT, false); logCtx.LogAcc.IsLogEnabled = false; diff --git a/ydb/core/blobstorage/dsproxy/ut_fat/dsproxy_ut.cpp b/ydb/core/blobstorage/dsproxy/ut_fat/dsproxy_ut.cpp index 79b3072da43a..e66e40b95687 100644 --- a/ydb/core/blobstorage/dsproxy/ut_fat/dsproxy_ut.cpp +++ b/ydb/core/blobstorage/dsproxy/ut_fat/dsproxy_ut.cpp @@ -3420,6 +3420,7 @@ class TTestBlobStorageProxyBatchedPutRequestDoesNotContainAHugeBlob : public TTe .HandleClass = HandleClass, .Tactic = Tactic, .EnableRequestMod3x3ForMinLatency = false, + .AccelerationParams = TAccelerationParams{}, }); ctx.Register(reqActor); @@ -4203,8 +4204,20 @@ class TBlobStorageProxyTest: public TTestBase { TIntrusivePtr dsProxyNodeMon(new TDsProxyNodeMon(counters, true)); TDsProxyPerPoolCounters perPoolCounters(counters); TIntrusivePtr storagePoolCounters = perPoolCounters.GetPoolCounters("pool_name"); + TControlWrapper enablePutBatching(args.EnablePutBatching, false, true); + TControlWrapper enableVPatch(DefaultEnableVPatch, false, true); + TControlWrapper slowDiskThreshold(DefaultSlowDiskThreshold * 1000, 1, 1000000); + TControlWrapper predictedDelayMultiplier(DefaultPredictedDelayMultiplier * 1000, 1, 1000000); std::unique_ptr proxyActor{CreateBlobStorageGroupProxyConfigured(TIntrusivePtr(bsInfo), false, - dsProxyNodeMon, TIntrusivePtr(storagePoolCounters), args.EnablePutBatching, DefaultEnableVPatch)}; + dsProxyNodeMon, TIntrusivePtr(storagePoolCounters), + TBlobStorageProxyParameters{ + .EnablePutBatching = enablePutBatching, + .EnableVPatch = enableVPatch, + .SlowDiskThreshold = slowDiskThreshold, + .PredictedDelayMultiplier = predictedDelayMultiplier, + } + ) + }; TActorSetupCmd bsproxySetup(proxyActor.release(), TMailboxType::Revolving, 3); setup1->LocalServices.push_back(std::pair(env->ProxyId, std::move(bsproxySetup))); diff --git a/ydb/core/blobstorage/dsproxy/ut_ftol/dsproxy_fault_tolerance_ut_runtime.h b/ydb/core/blobstorage/dsproxy/ut_ftol/dsproxy_fault_tolerance_ut_runtime.h index d6220c72972a..1f5bb2805a00 100644 --- a/ydb/core/blobstorage/dsproxy/ut_ftol/dsproxy_fault_tolerance_ut_runtime.h +++ b/ydb/core/blobstorage/dsproxy/ut_ftol/dsproxy_fault_tolerance_ut_runtime.h @@ -87,8 +87,16 @@ class TFaultToleranceTestRuntime { TIntrusivePtr storagePoolCounters = perPoolCounters.GetPoolCounters("pool_name"); TControlWrapper enablePutBatching(DefaultEnablePutBatching, false, true); TControlWrapper enableVPatch(DefaultEnableVPatch, false, true); + TControlWrapper slowDiskThreshold(DefaultSlowDiskThreshold * 1000, 1, 1000000); + TControlWrapper predictedDelayMultiplier(DefaultPredictedDelayMultiplier * 1000, 1, 1000000); IActor *dsproxy = CreateBlobStorageGroupProxyConfigured(TIntrusivePtr(GroupInfo), false, nodeMon, - std::move(storagePoolCounters), enablePutBatching, enableVPatch); + std::move(storagePoolCounters), TBlobStorageProxyParameters{ + .EnablePutBatching = enablePutBatching, + .EnableVPatch = enableVPatch, + .SlowDiskThreshold = slowDiskThreshold, + .PredictedDelayMultiplier = predictedDelayMultiplier, + } + ); setup->LocalServices.emplace_back(MakeBlobStorageProxyID(GroupInfo->GroupID), TActorSetupCmd(dsproxy, TMailboxType::Simple, 0)); diff --git a/ydb/core/blobstorage/dsproxy/ut_strategy/strategy_ut.cpp b/ydb/core/blobstorage/dsproxy/ut_strategy/strategy_ut.cpp index 64ace509dd63..8b69da407acc 100644 --- a/ydb/core/blobstorage/dsproxy/ut_strategy/strategy_ut.cpp +++ b/ydb/core/blobstorage/dsproxy/ut_strategy/strategy_ut.cpp @@ -165,7 +165,7 @@ void RunStrategyTest(TBlobStorageGroupType type) { TString state = blackboard[id].ToString(); - auto outcome = blackboard.RunStrategy(logCtx, strategy); + auto outcome = blackboard.RunStrategy(logCtx, strategy, TAccelerationParams{}); TString nextState = blackboard[id].ToString(); if (const auto [it, inserted] = transitions.try_emplace(state, std::make_tuple(outcome, nextState)); !inserted) { @@ -173,7 +173,7 @@ void RunStrategyTest(TBlobStorageGroupType type) { } if (outcome == EStrategyOutcome::IN_PROGRESS) { - auto temp = blackboard.RunStrategy(logCtx, strategy); + auto temp = blackboard.RunStrategy(logCtx, strategy, TAccelerationParams{}); UNIT_ASSERT_EQUAL(temp, outcome); UNIT_ASSERT_VALUES_EQUAL(blackboard[id].ToString(), nextState); } @@ -328,7 +328,7 @@ Y_UNIT_TEST_SUITE(DSProxyStrategyTest) { logCtx.SuppressLog = true; auto runStrategies = [&](TBlackboard& blackboard) { - return blackboard.RunStrategy(logCtx, TMirror3dcGetWithRestoreStrategy()); + return blackboard.RunStrategy(logCtx, TMirror3dcGetWithRestoreStrategy(), TAccelerationParams{}); }; const ui32 base = RandomNumber(512u); diff --git a/ydb/core/blobstorage/nodewarden/node_warden.h b/ydb/core/blobstorage/nodewarden/node_warden.h index 561a8fdc0348..24509a0f0300 100644 --- a/ydb/core/blobstorage/nodewarden/node_warden.h +++ b/ydb/core/blobstorage/nodewarden/node_warden.h @@ -37,6 +37,7 @@ namespace NKikimr { // debugging options bool VDiskReplPausedAtStart = false; + bool UseActorSystemTimeInBSQueue = false; TNodeWardenConfig(const TIntrusivePtr &pDiskServiceFactory) : PDiskServiceFactory(pDiskServiceFactory) diff --git a/ydb/core/blobstorage/nodewarden/node_warden_impl.cpp b/ydb/core/blobstorage/nodewarden/node_warden_impl.cpp index f2995a5b441f..fd1303c94477 100644 --- a/ydb/core/blobstorage/nodewarden/node_warden_impl.cpp +++ b/ydb/core/blobstorage/nodewarden/node_warden_impl.cpp @@ -193,6 +193,9 @@ void TNodeWarden::Bootstrap() { "VDiskControls.DiskTimeAvailableScaleSSD"); icb->RegisterSharedControl(CostMetricsParametersByMedia[NPDisk::DEVICE_TYPE_NVME].DiskTimeAvailableScale, "VDiskControls.DiskTimeAvailableScaleNVME"); + + icb->RegisterSharedControl(SlowDiskThreshold, "DSProxyControls.SlowDiskThreshold"); + icb->RegisterSharedControl(PredictedDelayMultiplier, "DSProxyControls.PredictedDelayMultiplier"); } // start replication broker diff --git a/ydb/core/blobstorage/nodewarden/node_warden_impl.h b/ydb/core/blobstorage/nodewarden/node_warden_impl.h index 3a141f3b113c..be7518c4dec0 100644 --- a/ydb/core/blobstorage/nodewarden/node_warden_impl.h +++ b/ydb/core/blobstorage/nodewarden/node_warden_impl.h @@ -146,6 +146,9 @@ namespace NKikimr::NStorage { TCostMetricsParametersByMedia CostMetricsParametersByMedia; + TControlWrapper SlowDiskThreshold; + TControlWrapper PredictedDelayMultiplier; + public: struct TGroupRecord; @@ -169,6 +172,8 @@ namespace NKikimr::NStorage { TCostMetricsParameters{50}, TCostMetricsParameters{32}, }) + , SlowDiskThreshold(2000, 1, 1000000) + , PredictedDelayMultiplier(1000, 1, 1000) { Y_ABORT_UNLESS(Cfg->BlobStorageConfig.GetServiceSet().AvailabilityDomainsSize() <= 1); AvailDomainId = 1; diff --git a/ydb/core/blobstorage/nodewarden/node_warden_proxy.cpp b/ydb/core/blobstorage/nodewarden/node_warden_proxy.cpp index 599bb5861c84..37e051a42283 100644 --- a/ydb/core/blobstorage/nodewarden/node_warden_proxy.cpp +++ b/ydb/core/blobstorage/nodewarden/node_warden_proxy.cpp @@ -35,9 +35,14 @@ void TNodeWarden::StartLocalProxy(ui32 groupId) { case NKikimrBlobStorage::TGroupDecommitStatus::IN_PROGRESS: // create proxy that will be used by blob depot agent to fetch underlying data proxyActorId = as->Register(CreateBlobStorageGroupProxyConfigured( - TIntrusivePtr(info), false, DsProxyNodeMon, - getCounters(info), EnablePutBatching, EnableVPatch), TMailboxType::ReadAsFilled, - AppData()->SystemPoolId); + TIntrusivePtr(info), false, DsProxyNodeMon, getCounters(info), + TBlobStorageProxyParameters{ + .UseActorSystemTimeInBSQueue = Cfg->UseActorSystemTimeInBSQueue, + .EnablePutBatching = EnablePutBatching, + .EnableVPatch = EnableVPatch, + .SlowDiskThreshold = SlowDiskThreshold, + .PredictedDelayMultiplier = PredictedDelayMultiplier, + }), TMailboxType::ReadAsFilled, AppData()->SystemPoolId); [[fallthrough]]; case NKikimrBlobStorage::TGroupDecommitStatus::DONE: proxy.reset(NBlobDepot::CreateBlobDepotAgent(groupId, info, proxyActorId)); @@ -50,12 +55,24 @@ void TNodeWarden::StartLocalProxy(ui32 groupId) { } } else { // create proxy with configuration - proxy.reset(CreateBlobStorageGroupProxyConfigured(TIntrusivePtr(info), false, DsProxyNodeMon, getCounters(info), - EnablePutBatching, EnableVPatch)); + proxy.reset(CreateBlobStorageGroupProxyConfigured(TIntrusivePtr(info), false, + DsProxyNodeMon, getCounters(info), TBlobStorageProxyParameters{ + .EnablePutBatching = EnablePutBatching, + .EnableVPatch = EnableVPatch, + .SlowDiskThreshold = SlowDiskThreshold, + .PredictedDelayMultiplier = PredictedDelayMultiplier, + } + ) + ); } } else { // create proxy without configuration - proxy.reset(CreateBlobStorageGroupProxyUnconfigured(groupId, DsProxyNodeMon, EnablePutBatching, EnableVPatch)); + proxy.reset(CreateBlobStorageGroupProxyUnconfigured(groupId, DsProxyNodeMon, TBlobStorageProxyParameters{ + .EnablePutBatching = EnablePutBatching, + .EnableVPatch = EnableVPatch, + .SlowDiskThreshold = SlowDiskThreshold, + .PredictedDelayMultiplier = PredictedDelayMultiplier, + })); } group.ProxyId = as->Register(proxy.release(), TMailboxType::ReadAsFilled, AppData()->SystemPoolId); diff --git a/ydb/core/blobstorage/ut_blobstorage/acceleration.cpp b/ydb/core/blobstorage/ut_blobstorage/acceleration.cpp index 69ae209bf1e6..4e88bc051459 100644 --- a/ydb/core/blobstorage/ut_blobstorage/acceleration.cpp +++ b/ydb/core/blobstorage/ut_blobstorage/acceleration.cpp @@ -1,4 +1,6 @@ #include +#include +#include #include #include @@ -8,164 +10,537 @@ #define Ctest Cnull Y_UNIT_TEST_SUITE(Acceleration) { + using TFlowRecord = TIntrusivePtr; + using TQueueId = NKikimrBlobStorage::EVDiskQueueId; + + struct TDiskDelay { + TWeightedRandom Delays; + TDuration Max; + TString Tag; + + TDiskDelay(TDuration delay = TDuration::Zero(), TString tag = "") + : Max(delay) + , Tag(tag) + { + Delays.AddValue(delay, 1); + } - void SetupEnv(const TBlobStorageGroupType& erasure, std::unique_ptr& env, - ui32& nodeCount, ui32& groupId) { - nodeCount = erasure.BlobSubgroupSize(); + TDiskDelay(TDuration min, ui64 minWeight, TDuration max, ui64 maxWeight, TString tag = "") + : Max(max) + , Tag(tag) + { + Delays.AddValue(min, minWeight); + Delays.AddValue(max, maxWeight); + } - env.reset(new TEnvironmentSetup{{ - .NodeCount = nodeCount, - .Erasure = erasure, - }}); + TDiskDelay(const TDiskDelay&) = default; + TDiskDelay(TDiskDelay&&) = default; + TDiskDelay& operator=(const TDiskDelay&) = default; + TDiskDelay& operator=(TDiskDelay&&) = default; + TDuration GetRandom() { + return Delays.GetRandom(); + } + }; - env->CreateBoxAndPool(1, 1); - env->Sim(TDuration::Minutes(1)); + struct TEvDelayedMessageWrapper : public TEventLocal { + public: + std::unique_ptr Event; - NKikimrBlobStorage::TBaseConfig base = env->FetchBaseConfig(); - UNIT_ASSERT_VALUES_EQUAL(base.GroupSize(), 1); - groupId = base.GetGroup(0).GetGroupId(); + TEvDelayedMessageWrapper(std::unique_ptr& ev) + : Event(ev.release()) + {} + }; - TActorId edge = env->Runtime->AllocateEdgeActor(1); + struct TVDiskDelayEmulator { + TVDiskDelayEmulator(const std::shared_ptr& env) + : Env(env) + {} - env->Runtime->WrapInActorContext(edge, [&] { - SendToBSProxy(edge, groupId, new TEvBlobStorage::TEvStatus(TInstant::Max())); - }); - auto res = env->WaitForEdgeActorEvent(edge, true, TInstant::Max()); - UNIT_ASSERT_VALUES_EQUAL(res->Get()->Status, NKikimrProto::OK); - } + using TFlowKey = std::pair; // { nodeId, queueId } - void TestAcceleratePut(const TBlobStorageGroupType& erasure, ui32 slowDisksNum, - NKikimrBlobStorage::EPutHandleClass handleClass) { - for (ui32 fastDisksNum = 0; fastDisksNum < erasure.BlobSubgroupSize() - 2; ++fastDisksNum) { - std::unique_ptr env; - ui32 nodeCount; - ui32 groupId; - SetupEnv(erasure, env, nodeCount, groupId); + std::shared_ptr Env; + TActorId Edge; + // assuming there is only one disk per node + std::unordered_map FlowRecords; - constexpr TDuration delay = TDuration::Seconds(2); - constexpr TDuration waitFor = TDuration::Seconds(1); + std::unordered_map DelayByNode; + std::deque DelayByResponseOrder; + TDiskDelay DefaultDelay = TDuration::Seconds(1); + bool LogUnwrap = false; - Ctest << "fastDisksNum# " << fastDisksNum << Endl; + using TEventHandler = std::function&)>; - TActorId edge = env->Runtime->AllocateEdgeActor(1); - TString data = "Test"; - TLogoBlobID blobId = TLogoBlobID(1, 1, 1, 1, data.size(), 1); + std::unordered_map EventHandlers; + + void AddHandler(ui32 eventType, TEventHandler handler) { + EventHandlers[eventType] = handler; + } + + bool Filter(ui32/* nodeId*/, std::unique_ptr& ev) { + if (ev->GetTypeRewrite() == TEvDelayedMessageWrapper::EventType) { + std::unique_ptr delayedMsg(std::move(ev)); + ev.reset(delayedMsg->Get()->Event.release()); + if (LogUnwrap) { + Ctest << TAppData::TimeProvider->Now() << " Unwrap " << ev->ToString() << Endl; + } + return true; + } + + ui32 type = ev->GetTypeRewrite(); + auto it = EventHandlers.find(type); + if (it != EventHandlers.end() && it->second) { + return (it->second)(ev); + } + return true; + } + + TDuration GetMsgDelay(ui32 vdiskNodeId) { + TDiskDelay& delay = DefaultDelay; + auto it = DelayByNode.find(vdiskNodeId); + if (it == DelayByNode.end()) { + if (!DelayByResponseOrder.empty()) { + delay = DelayByResponseOrder.front(); + DelayByResponseOrder.pop_front(); + } + DelayByNode[vdiskNodeId] = delay; + } else { + delay = it->second; + } + TDuration rand = delay.GetRandom(); + return rand; + } + + TDuration DelayMsg(std::unique_ptr& ev) { + TDuration delay = GetMsgDelay(ev->Sender.NodeId()); + + Env->Runtime->WrapInActorContext(Edge, [&] { + TActivationContext::Schedule(delay, new IEventHandle( + ev->Sender, + ev->Recipient, + new TEvDelayedMessageWrapper(ev)) + ); + }); + return delay; + } + + void SetDelayByResponseOrder(const std::deque& delays) { + DelayByResponseOrder = delays; + DelayByNode = {}; + } + }; + + struct TDelayer { + std::shared_ptr VDiskDelayEmulator; + + bool operator()(ui32 nodeId, std::unique_ptr& ev) { + return VDiskDelayEmulator->Filter(nodeId, ev); + } + }; + + struct TestCtx { + TestCtx(const TBlobStorageGroupType& erasure, float slowDiskThreshold, float delayMultiplier) + : NodeCount(erasure.BlobSubgroupSize() + 1) + , Erasure(erasure) + , Env(new TEnvironmentSetup({ + .NodeCount = NodeCount, + .Erasure = erasure, + .LocationGenerator = [this](ui32 nodeId) { return LocationGenerator(nodeId); }, + .SlowDiskThreshold = slowDiskThreshold, + .VDiskPredictedDelayMultiplier = delayMultiplier, + })) + , VDiskDelayEmulator(new TVDiskDelayEmulator(Env)) + {} + + TNodeLocation LocationGenerator(ui32 nodeId) { + if (Erasure.BlobSubgroupSize() == 9) { + if (nodeId == NodeCount) { + return TNodeLocation{"4", "1", "1", "1"}; + } + return TNodeLocation{ + std::to_string((nodeId - 1) / 3), + "1", + std::to_string((nodeId - 1) % 3), + "0" + }; + } else { + if (nodeId == NodeCount) { + return TNodeLocation{"2", "1", "1", "1"}; + } + return TNodeLocation{"1", "1", std::to_string(nodeId), "0"}; + } + } + + void Initialize() { + Env->CreateBoxAndPool(1, 1); + Env->Sim(TDuration::Minutes(1)); + + NKikimrBlobStorage::TBaseConfig base = Env->FetchBaseConfig(); + UNIT_ASSERT_VALUES_EQUAL(base.GroupSize(), 1); + const auto& group = base.GetGroup(0); + GroupId = group.GetGroupId(); - env->Runtime->WrapInActorContext(edge, [&] { - SendToBSProxy(edge, groupId, new TEvBlobStorage::TEvPut(blobId, data, TInstant::Max()), handleClass); + Edge = Env->Runtime->AllocateEdgeActor(NodeCount); + VDiskDelayEmulator->Edge = Edge; + + std::unordered_map OrderNumberToNodeId; + + for (ui32 orderNum = 0; orderNum < group.VSlotIdSize(); ++orderNum) { + OrderNumberToNodeId[orderNum] = group.GetVSlotId(orderNum).GetNodeId(); + } + + Env->Runtime->WrapInActorContext(Edge, [&] { + SendToBSProxy(Edge, GroupId, new TEvBlobStorage::TEvStatus(TInstant::Max())); }); + auto res = Env->WaitForEdgeActorEvent(Edge, false, TInstant::Max()); + + Env->Runtime->FilterFunction = TDelayer{ .VDiskDelayEmulator = VDiskDelayEmulator }; + + for (const TQueueId& queueId : { TQueueId::PutTabletLog, TQueueId::GetFastRead, TQueueId::PutAsyncBlob, + TQueueId::GetAsyncRead }) { + Ctest << "Send TEvGetQueuesInfo " << queueId << Endl; + Env->Runtime->WrapInActorContext(Edge, [&] { + SendToBSProxy(Edge, GroupId, new TEvGetQueuesInfo(queueId)); + }); + auto res = Env->WaitForEdgeActorEvent(Edge, false, TInstant::Max()); + Ctest << "Get TEvQueuesInfo " << res->Get()->ToString() << Endl; + + for (ui32 orderNum = 0; orderNum < res->Get()->Queues.size(); ++orderNum) { + const std::optional& queue = res->Get()->Queues[orderNum]; + if (queue) { + Y_ABORT_UNLESS(queue->FlowRecord); + queue->FlowRecord->SetPredictedDelayNs(VDiskDelayEmulator->DefaultDelay.Max.NanoSeconds()); + VDiskDelayEmulator->FlowRecords[{ OrderNumberToNodeId[orderNum], queueId }] = queue->FlowRecord; + } + } + } + } + + ~TestCtx() { + Env->Runtime->FilterFunction = {}; + } - THashSet fastDisks; - THashSet slowDisks; + ui32 NodeCount; + TBlobStorageGroupType Erasure; + std::shared_ptr Env; + + ui32 GroupId; + TActorId Edge; + std::shared_ptr VDiskDelayEmulator; + }; + + #define ADD_DSPROXY_MESSAGE_PRINTER(MsgType) \ + ctx.VDiskDelayEmulator->AddHandler(MsgType::EventType, [&](std::unique_ptr& ev) { \ + if (ev->Recipient.NodeId() == ctx.NodeCount) { \ + Ctest << TAppData::TimeProvider->Now() << " Send "#MsgType": " << ev->Sender.ToString() << " " << \ + ev->Recipient.ToString() << ev->Get()->ToString() << Endl; \ + } \ + return true; \ + } \ + ) + + void TestAcceleratePut(const TBlobStorageGroupType& erasure, ui32 slowDisksNum, + NKikimrBlobStorage::EPutHandleClass handleClass, TDuration fastDelay, + TDuration slowDelay, TDuration initDelay, TDuration waitTime, + float delayMultiplier) { + ui32 initialRequests = 100; + float slowDiskThreshold = 2; + TDiskDelay fastDiskDelay = TDiskDelay(fastDelay); + TDiskDelay slowDiskDelay = TDiskDelay(slowDelay); + TDiskDelay initDiskDelay = TDiskDelay(initDelay); + + for (ui32 fastDisksNum = 0; fastDisksNum < erasure.BlobSubgroupSize() - 2; ++fastDisksNum) { + Ctest << "fastDisksNum# " << fastDisksNum << Endl; + TestCtx ctx(erasure, slowDiskThreshold, delayMultiplier); + ctx.VDiskDelayEmulator->DefaultDelay = initDiskDelay; + ctx.Initialize(); + + TString data = MakeData(1024); + auto put = [&](TLogoBlobID blobId) { + ctx.Env->Runtime->WrapInActorContext(ctx.Edge, [&] { + SendToBSProxy(ctx.Edge, ctx.GroupId, new TEvBlobStorage::TEvPut(blobId, data, TInstant::Max()), handleClass); + }); + auto res = ctx.Env->WaitForEdgeActorEvent( + ctx.Edge, false, TAppData::TimeProvider->Now() + waitTime); + UNIT_ASSERT_C(res, "fastDisksNum# " << fastDisksNum); + UNIT_ASSERT_VALUES_EQUAL(res->Get()->Status, NKikimrProto::OK); + }; - env->Runtime->FilterFunction = [&](ui32/* nodeId*/, std::unique_ptr& ev) { - if (ev->GetTypeRewrite() == TEvBlobStorage::TEvVPutResult::EventType) { + bool verboseHandlers = false; + ctx.VDiskDelayEmulator->AddHandler(TEvBlobStorage::TEvVPutResult::EventType, [&](std::unique_ptr& ev) { + ui32 nodeId = ev->Sender.NodeId(); + if (nodeId < ctx.NodeCount) { TVDiskID vdiskId = VDiskIDFromVDiskID(ev->Get()->Record.GetVDiskID()); TLogoBlobID partId = LogoBlobIDFromLogoBlobID(ev->Get()->Record.GetBlobID()); - Ctest << TAppData::TimeProvider->Now() << " TEvVPutResult: vdiskId# " << vdiskId.ToString() << - " partId# " << partId.ToString() << ", "; - if (fastDisks.size() < fastDisksNum || fastDisks.count(vdiskId)) { - fastDisks.insert(vdiskId); - Ctest << "pass message" << Endl; - return true; - } else if (!slowDisks.count(vdiskId) && slowDisks.size() >= slowDisksNum) { - Ctest << "pass message" << Endl; - return true; - } else { - Ctest << "delay message for " << delay.ToString() << Endl; - slowDisks.insert(vdiskId); - env->Runtime->WrapInActorContext(edge, [&] { - TActivationContext::Schedule(delay, ev.release()); - }); - - return false; + TDuration delay = ctx.VDiskDelayEmulator->DelayMsg(ev); + if (verboseHandlers) { + Ctest << TAppData::TimeProvider->Now() << " TEvVPutResult: vdiskId# " << vdiskId.ToString() << + " partId# " << partId.ToString() << " nodeId# " << nodeId << ", delay " << delay << Endl; } + return false; } return true; - }; + }); + + for (ui32 i = 0; i < initialRequests; ++i) { + put(TLogoBlobID(1, 1, 1, 1, data.size(), 123 + i)); + } + + ctx.Env->Sim(slowDelay); + + std::deque delayByResponseOrder; + for (ui32 i = 0; i < erasure.BlobSubgroupSize(); ++i) { + if (i >= fastDisksNum && i < fastDisksNum + slowDisksNum) { + delayByResponseOrder.push_back(slowDiskDelay); + } else { + delayByResponseOrder.push_back(fastDiskDelay); + } + } + ctx.VDiskDelayEmulator->SetDelayByResponseOrder(delayByResponseOrder); + + ctx.VDiskDelayEmulator->LogUnwrap = true; + verboseHandlers = true; + ADD_DSPROXY_MESSAGE_PRINTER(TEvBlobStorage::TEvVPut); + put(TLogoBlobID(1, 1, 1, 1, data.size(), 1)); - auto res = env->WaitForEdgeActorEvent(edge, false, TAppData::TimeProvider->Now() + waitFor); - UNIT_ASSERT_C(res, "fastDisksNum# " << fastDisksNum); - UNIT_ASSERT_VALUES_EQUAL(res->Get()->Status, NKikimrProto::OK); } } void TestAccelerateGet(const TBlobStorageGroupType& erasure, ui32 slowDisksNum, - NKikimrBlobStorage::EGetHandleClass handleClass) { - for (ui32 fastDisksNum = 0; fastDisksNum < erasure.BlobSubgroupSize() - 2; ++fastDisksNum) { - std::unique_ptr env; - ui32 nodeCount; - ui32 groupId; - SetupEnv(erasure, env, nodeCount, groupId); - - constexpr TDuration delay = TDuration::Seconds(2); - constexpr TDuration waitFor = TDuration::Seconds(1); + NKikimrBlobStorage::EGetHandleClass handleClass, TDuration fastDelay, + TDuration slowDelay, TDuration initDelay, TDuration waitTime, + float delayMultiplier) { + ui32 initialRequests = 100; + float slowDiskThreshold = 2; + TDiskDelay fastDiskDelay = TDiskDelay(fastDelay); + TDiskDelay slowDiskDelay = TDiskDelay(slowDelay); + TDiskDelay initDiskDelay = TDiskDelay(initDelay); + for (ui32 fastDisksNum = 0; fastDisksNum < erasure.BlobSubgroupSize() - 2; ++fastDisksNum) { Ctest << "fastDisksNum# " << fastDisksNum << Endl; + TestCtx ctx(erasure, slowDiskThreshold, delayMultiplier); + ctx.VDiskDelayEmulator->DefaultDelay = initDiskDelay; + ctx.Initialize(); + + bool verboseHandlers = false; + ctx.VDiskDelayEmulator->AddHandler(TEvBlobStorage::TEvVGetResult::EventType, [&](std::unique_ptr& ev) { + ui32 nodeId = ev->Sender.NodeId(); + if (nodeId < ctx.NodeCount) { + TVDiskID vdiskId = VDiskIDFromVDiskID(ev->Get()->Record.GetVDiskID()); + TLogoBlobID partId = LogoBlobIDFromLogoBlobID( + ev->Get()->Record.GetResult(0).GetBlobID()); + TDuration delay = ctx.VDiskDelayEmulator->DelayMsg(ev); + if (verboseHandlers) { + Ctest << TAppData::TimeProvider->Now() << " TEvVGetResult: vdiskId# " << vdiskId.ToString() << + " partId# " << partId.ToString() << " nodeId# " << nodeId << ", delay " << delay << Endl; + } + return false; + } + return true; + }); - TActorId edge = env->Runtime->AllocateEdgeActor(1); TString data = MakeData(1024); - TLogoBlobID blobId = TLogoBlobID(1, 1, 1, 1, data.size(), 1); + auto putAndGet = [&](TLogoBlobID blobId) { + ctx.Env->Runtime->WrapInActorContext(ctx.Edge, [&] { + SendToBSProxy(ctx.Edge, ctx.GroupId, new TEvBlobStorage::TEvPut(blobId, data, TInstant::Max())); + }); + auto putRes = ctx.Env->WaitForEdgeActorEvent(ctx.Edge, false, TInstant::Max()); + UNIT_ASSERT_VALUES_EQUAL(putRes->Get()->Status, NKikimrProto::OK); + + ctx.Env->Runtime->WrapInActorContext(ctx.Edge, [&] { + SendToBSProxy(ctx.Edge, ctx.GroupId, new TEvBlobStorage::TEvGet(blobId, 0, data.size(), TInstant::Max(), handleClass)); + }); + auto getRes = ctx.Env->WaitForEdgeActorEvent(ctx.Edge, false, TAppData::TimeProvider->Now() + waitTime); + UNIT_ASSERT_C(getRes, "fastDisksNum# " << fastDisksNum); + UNIT_ASSERT_VALUES_EQUAL(getRes->Get()->Status, NKikimrProto::OK); + UNIT_ASSERT_VALUES_EQUAL(getRes->Get()->Responses[0].Status, NKikimrProto::OK); + }; + + for (ui32 i = 0; i < initialRequests; ++i) { + putAndGet(TLogoBlobID(1, 1, 1, 1, data.size(), 123 + i)); + } + ctx.Env->Sim(slowDelay); + + std::deque delayByResponseOrder; + for (ui32 i = 0; i < erasure.BlobSubgroupSize(); ++i) { + if (i >= fastDisksNum && i < fastDisksNum + slowDisksNum) { + delayByResponseOrder.push_back(slowDiskDelay); + } else { + delayByResponseOrder.push_back(fastDiskDelay); + } + } + ctx.VDiskDelayEmulator->SetDelayByResponseOrder(delayByResponseOrder); + + ctx.VDiskDelayEmulator->LogUnwrap = true; + verboseHandlers = true; + ADD_DSPROXY_MESSAGE_PRINTER(TEvBlobStorage::TEvVGet); + putAndGet(TLogoBlobID(1, 1, 1, 1, data.size(), 2)); + } + } + + using TTestThresholdRequestSender = std::function; + + void TestThresholdSendPutRequests(TestCtx& ctx, ui32 requests) { + ui64 cookie = 1; + + for (ui32 i = 0; i < requests; ++i) { + TString data = "Test"; + TLogoBlobID blobId = TLogoBlobID(1, 1, 1, 1, data.size(), ++cookie); - env->Runtime->WrapInActorContext(edge, [&] { - SendToBSProxy(edge, groupId, new TEvBlobStorage::TEvPut(blobId, data, TInstant::Max())); + Ctest << " ------------------- Send TEvPut# " << i << " ------------------- " << Endl; + ctx.Env->Runtime->WrapInActorContext(ctx.Edge, [&] { + SendToBSProxy(ctx.Edge, ctx.GroupId, new TEvBlobStorage::TEvPut(blobId, data, TInstant::Max())); + }); + auto res = ctx.Env->WaitForEdgeActorEvent(ctx.Edge, false, TInstant::Max()); + UNIT_ASSERT_VALUES_EQUAL(res->Get()->Status, NKikimrProto::OK); + } + } + + void TestThresholdSendGetRequests(TestCtx& ctx, ui32 requests) { + ui64 cookie = 1; + std::vector blobs; + TString data = MakeData(1024); + + for (ui32 i = 0; i < requests; ++i) { + TLogoBlobID blobId = TLogoBlobID(1, 1, 1, 1, data.size(), ++cookie); + ctx.Env->Runtime->WrapInActorContext(ctx.Edge, [&] { + SendToBSProxy(ctx.Edge, ctx.GroupId, new TEvBlobStorage::TEvPut(blobId, data, TInstant::Max())); }); - - env->WaitForEdgeActorEvent(edge, false, TInstant::Max()); - env->Runtime->WrapInActorContext(edge, [&] { - SendToBSProxy(edge, groupId, new TEvBlobStorage::TEvGet(blobId, 0, data.size(), TInstant::Max(), handleClass)); + blobs.push_back(blobId); + auto res = ctx.Env->WaitForEdgeActorEvent(ctx.Edge, false, TInstant::Max()); + UNIT_ASSERT_VALUES_EQUAL(res->Get()->Status, NKikimrProto::OK); + } + + for (const auto& blobId : blobs) { + ctx.Env->Runtime->WrapInActorContext(ctx.Edge, [&] { + SendToBSProxy(ctx.Edge, ctx.GroupId, new TEvBlobStorage::TEvGet(blobId, 0, data.size(), TInstant::Max(), + NKikimrBlobStorage::AsyncRead)); }); + auto res = ctx.Env->WaitForEdgeActorEvent(ctx.Edge, false, TInstant::Max()); + UNIT_ASSERT_VALUES_EQUAL(res->Get()->Status, NKikimrProto::OK); + UNIT_ASSERT_VALUES_EQUAL(res->Get()->Responses[0].Status, NKikimrProto::OK); + } + } - THashSet slowDisks; - THashSet fastDisks; + void TestThreshold(const TBlobStorageGroupType& erasure, ui32 slowDisks, bool delayPuts, bool delayGets, + TTestThresholdRequestSender sendRequests) { + float delayMultiplier = 1; + float slowDiskThreshold = 1.2; + TDiskDelay fastDiskDelay = TDiskDelay(TDuration::Seconds(0.1), 10, TDuration::Seconds(1), 1, "fast"); + TDiskDelay slowDiskDelay = TDiskDelay(TDuration::Seconds(1.5), "slow"); + + ui32 requests = 1000; + + TestCtx ctx(erasure, slowDiskThreshold, delayMultiplier); + ctx.VDiskDelayEmulator->DefaultDelay = fastDiskDelay; + ui32 groupSize = erasure.BlobSubgroupSize(); + + std::vector nodeIsSlow(groupSize, true); + std::vector vputsByNode(groupSize, 0); + + for (ui32 i = 0; i < groupSize; ++i) { + bool isSlow = (i % 3 == 0 && i / 3 < slowDisks); + ctx.VDiskDelayEmulator->DelayByNode[i + 1] = isSlow ? slowDiskDelay : fastDiskDelay; + nodeIsSlow[i] = isSlow; + } - env->Runtime->FilterFunction = [&](ui32/* nodeId*/, std::unique_ptr& ev) { - if (ev->GetTypeRewrite() == TEvBlobStorage::TEvVGetResult::EventType) { + ctx.Initialize(); + ctx.VDiskDelayEmulator->LogUnwrap = true; + + if (delayPuts) { + ctx.VDiskDelayEmulator->AddHandler(TEvBlobStorage::TEvVPutResult::EventType, [&](std::unique_ptr& ev) { + ui32 nodeId = ev->Sender.NodeId(); + if (nodeId < ctx.NodeCount) { + TVDiskID vdiskId = VDiskIDFromVDiskID(ev->Get()->Record.GetVDiskID()); + TLogoBlobID partId = LogoBlobIDFromLogoBlobID(ev->Get()->Record.GetBlobID()); + TDuration delay = ctx.VDiskDelayEmulator->DelayMsg(ev); + Ctest << TAppData::TimeProvider->Now() << " TEvVPutResult: vdiskId# " << vdiskId.ToString() << + " partId# " << partId.ToString() << " nodeId# " << nodeId << ", delay " << delay << Endl; + ++vputsByNode[nodeId - 1]; + return false; + } + return true; + }); + ADD_DSPROXY_MESSAGE_PRINTER(TEvBlobStorage::TEvVPut); + } + + if (delayGets) { + ctx.VDiskDelayEmulator->AddHandler(TEvBlobStorage::TEvVGetResult::EventType, [&](std::unique_ptr& ev) { + ui32 nodeId = ev->Sender.NodeId(); + if (nodeId < ctx.NodeCount) { TVDiskID vdiskId = VDiskIDFromVDiskID(ev->Get()->Record.GetVDiskID()); TLogoBlobID partId = LogoBlobIDFromLogoBlobID( ev->Get()->Record.GetResult(0).GetBlobID()); - Ctest << TAppData::TimeProvider->Now() << " TEvVGetResult: " << vdiskId.ToString() << - " partId# " << partId.ToString() << ", "; - if (fastDisks.size() < fastDisksNum || fastDisks.count(vdiskId)) { - fastDisks.insert(vdiskId); - Ctest << "pass message" << Endl; - return true; - } else if (!slowDisks.count(vdiskId) && slowDisks.size() >= slowDisksNum) { - Ctest << "pass message" << Endl; - return true; - } else { - Ctest << "delay message for " << delay.ToString() << Endl; - slowDisks.insert(vdiskId); - env->Runtime->WrapInActorContext(edge, [&] { - TActivationContext::Schedule(delay, ev.release()); - }); - - return false; - } + TDuration delay = ctx.VDiskDelayEmulator->DelayMsg(ev); + Ctest << TAppData::TimeProvider->Now() << " TEvVGetResult: vdiskId# " << vdiskId.ToString() << + " partId# " << partId.ToString() << " nodeId# " << nodeId << ", delay " << delay << Endl; + ++vputsByNode[nodeId - 1]; + return false; } return true; - }; + }); + ADD_DSPROXY_MESSAGE_PRINTER(TEvBlobStorage::TEvVGet); + } - auto res = env->WaitForEdgeActorEvent(edge, false, TAppData::TimeProvider->Now() + waitFor); - UNIT_ASSERT_C(res, "fastDisksNum# " << fastDisksNum); - UNIT_ASSERT_VALUES_EQUAL(res->Get()->Status, NKikimrProto::OK); - UNIT_ASSERT_VALUES_EQUAL(res->Get()->Responses[0].Status, NKikimrProto::OK); - Ctest << "TEvGetResult# " << res->Get()->ToString() << Endl; + sendRequests(ctx, requests); + + ui32 slowNodesCount = 0; + ui32 slowNodesRequests = 0; + ui32 fastNodesCount = 0; + ui32 fastNodesRequests = 0; + + TStringStream str; + + str << "VPUTS BY NODE: "; + for (ui32 i = 0; i < groupSize; ++i) { + str << "{ nodeId# " << i << " isSlow# " << nodeIsSlow[i] << ' ' << vputsByNode[i] << "}, "; + if (nodeIsSlow[i]) { + ++slowNodesCount; + slowNodesRequests += vputsByNode[i]; + } else { + ++fastNodesCount; + fastNodesRequests += vputsByNode[i]; + } } + Ctest << str.Str() << Endl; + + double slowNodeRequestsAvg = 1. * slowNodesRequests / slowNodesCount; + double fastNodeRequestsAvg = 1. * fastNodesRequests / fastNodesCount; + + UNIT_ASSERT_LE_C(slowNodeRequestsAvg, fastNodeRequestsAvg / 3, str.Str()); + } + + void TestThresholdPut(const TBlobStorageGroupType& erasure, ui32 slowDisks) { + TestThreshold(erasure, slowDisks, true, false, TestThresholdSendPutRequests); + } + + void TestThresholdGet(const TBlobStorageGroupType& erasure, ui32 slowDisks) { + TestThreshold(erasure, slowDisks, false, true, TestThresholdSendGetRequests); + } + + void TestDelayMultiplierPut(const TBlobStorageGroupType& erasure, ui32 slowDisks) { + TestAcceleratePut(erasure, slowDisks, NKikimrBlobStorage::AsyncBlob, TDuration::Seconds(0.9), + TDuration::Seconds(2), TDuration::Seconds(1), TDuration::Seconds(1.95), 0.8); + } + + void TestDelayMultiplierGet(const TBlobStorageGroupType& erasure, ui32 slowDisks) { + TestAccelerateGet(erasure, slowDisks, NKikimrBlobStorage::AsyncRead, TDuration::Seconds(0.9), + TDuration::Seconds(2 + ), TDuration::Seconds(1), TDuration::Seconds(1.95), 0.8); } #define TEST_ACCELERATE(erasure, method, handleClass, slowDisks) \ - Y_UNIT_TEST(Test##erasure##method##handleClass##slowDisks##Slow) { \ - TestAccelerate##method(TBlobStorageGroupType::Erasure##erasure, slowDisks, NKikimrBlobStorage::handleClass); \ + Y_UNIT_TEST(TestAcceleration##erasure##method##handleClass##slowDisks##Slow) { \ + TestAccelerate##method(TBlobStorageGroupType::Erasure##erasure, slowDisks, NKikimrBlobStorage::handleClass, \ + TDuration::Seconds(1), TDuration::Seconds(5), TDuration::Seconds(1), TDuration::Seconds(4), 1); \ } TEST_ACCELERATE(Mirror3dc, Put, AsyncBlob, 1); // TEST_ACCELERATE(Mirror3of4, Put, AsyncBlob, 1); TEST_ACCELERATE(4Plus2Block, Put, AsyncBlob, 1); -// TEST_ACCELERATE(Mirror3dc, Put, AsyncBlob, 2); + TEST_ACCELERATE(Mirror3dc, Put, AsyncBlob, 2); // TEST_ACCELERATE(Mirror3of4, Put, AsyncBlob, 2); TEST_ACCELERATE(4Plus2Block, Put, AsyncBlob, 2); @@ -173,9 +548,42 @@ Y_UNIT_TEST_SUITE(Acceleration) { // TEST_ACCELERATE(Mirror3of4, Get, AsyncRead, 1); TEST_ACCELERATE(4Plus2Block, Get, AsyncRead, 1); -// TEST_ACCELERATE(Mirror3dc, Get, AsyncRead, 2); + TEST_ACCELERATE(Mirror3dc, Get, AsyncRead, 2); // TEST_ACCELERATE(Mirror3of4, Get, AsyncRead, 2); TEST_ACCELERATE(4Plus2Block, Get, AsyncRead, 2); + #define TEST_ACCELERATE_PARAMS(param, method, erasure, slowDisks) \ + Y_UNIT_TEST(Test##param##method##erasure##slowDisks##Slow) { \ + Test##param##method(TBlobStorageGroupType::Erasure##erasure, slowDisks); \ + } + +// TEST_ACCELERATE_PARAMS(Threshold, Put, Mirror3dc, 1); + TEST_ACCELERATE_PARAMS(Threshold, Put, 4Plus2Block, 1); + +// TEST_ACCELERATE_PARAMS(Threshold, Put, Mirror3dc, 2); +// TEST_ACCELERATE_PARAMS(Threshold, Put, 4Plus2Block, 2); + +// TEST_ACCELERATE_PARAMS(Threshold, Get, Mirror3dc, 1); + TEST_ACCELERATE_PARAMS(Threshold, Get, 4Plus2Block, 1); + +// TEST_ACCELERATE_PARAMS(Threshold, Get, Mirror3dc, 2); +// TEST_ACCELERATE_PARAMS(Threshold, Get, 4Plus2Block, 2); + + // TODO(serg-belyakov): fix all muted tests + + TEST_ACCELERATE_PARAMS(DelayMultiplier, Put, Mirror3dc, 1); + TEST_ACCELERATE_PARAMS(DelayMultiplier, Put, 4Plus2Block, 1); + + TEST_ACCELERATE_PARAMS(DelayMultiplier, Put, Mirror3dc, 2); + TEST_ACCELERATE_PARAMS(DelayMultiplier, Put, 4Plus2Block, 2); + + TEST_ACCELERATE_PARAMS(DelayMultiplier, Get, Mirror3dc, 1); + TEST_ACCELERATE_PARAMS(DelayMultiplier, Get, 4Plus2Block, 1); + + TEST_ACCELERATE_PARAMS(DelayMultiplier, Get, Mirror3dc, 2); + TEST_ACCELERATE_PARAMS(DelayMultiplier, Get, 4Plus2Block, 2); + #undef TEST_ACCELERATE + #undef TEST_ACCELERATE_PARAMS + #undef PRINT_DSPROXY_MESSAGE } diff --git a/ydb/core/blobstorage/ut_blobstorage/blob_depot_test_env.h b/ydb/core/blobstorage/ut_blobstorage/blob_depot_test_env.h index f7b66f589a75..bdc6b0e0a534 100644 --- a/ydb/core/blobstorage/ut_blobstorage/blob_depot_test_env.h +++ b/ydb/core/blobstorage/ut_blobstorage/blob_depot_test_env.h @@ -75,29 +75,6 @@ struct TTabletInfo { using TBSState = std::map; -struct TIntervals { - std::vector Borders; // [0; x_1) [x_1; x_2) ... [x_n-1; x_n) - - TIntervals(std::vector borders) { - Borders = borders; - for (ui32 i = 1; i < Borders.size(); ++i) { - Borders[i] += Borders[i - 1]; - } - } - - ui32 GetInterval(ui32 x) { - for (ui32 i = 0; i < Borders.size(); ++i) { - if (x < Borders[i]) { - return i; - } - } - return Borders.size(); - } - ui32 UpperLimit() { - return Borders[Borders.size() - 1]; - } -}; - struct TEvArgs { enum EEventType : ui32 { PUT, @@ -181,6 +158,7 @@ struct TEvRangeArgs : public TEvArgs { }; struct TBlobDepotTestEnvironment { + ui64 RandomSeed; TMersenne Mt; TMersenne Mt64; @@ -191,7 +169,8 @@ struct TBlobDepotTestEnvironment { TBlobDepotTestEnvironment(ui32 seed = 0, ui32 numGroups = 1, ui32 nodeCount = 8, TBlobStorageGroupType erasure = TBlobStorageGroupType::ErasureMirror3of4) - : Mt(seed) + : RandomSeed(seed) + , Mt(seed) , Mt64(seed) { Cerr << "Mersenne random seed " << seed << Endl; ConfigureEnvironment(numGroups, Env, RegularGroups, BlobDepot, nodeCount, erasure); diff --git a/ydb/core/blobstorage/ut_blobstorage/blob_depot_test_functions.cpp b/ydb/core/blobstorage/ut_blobstorage/blob_depot_test_functions.cpp index 1cdbeb9b7cd3..ec56f7a91d6a 100644 --- a/ydb/core/blobstorage/ut_blobstorage/blob_depot_test_functions.cpp +++ b/ydb/core/blobstorage/ut_blobstorage/blob_depot_test_functions.cpp @@ -1,4 +1,5 @@ #include +#include #include #include @@ -460,14 +461,18 @@ void TestVerifiedRandom(TBlobDepotTestEnvironment& tenv, ui32 nodeCount, ui64 ta COLLECT_GARBAGE_HARD, COLLECT_GARBAGE_SOFT, RESTART_BLOB_DEPOT, + __COUNT__, }; std::vector actionName = { "ALTER", "PUT", "GET", "MULTIGET", "RANGE", "BLOCK", "DISCOVER", "COLLECT_GARBAGE_HARD", "COLLECT_GARBAGE_SOFT", "RESTART_BLOB_DEPOT" }; - std::vector probs = probabilities; - TIntervals act(probs); + TWeightedRandom act(tenv.RandomSeed + 0xABCD); + Y_ABORT_UNLESS(probabilities.size() == EActions::__COUNT__); + for (ui32 i = 0; i < probabilities.size(); ++i) { + act.AddValue(i, probabilities[i]); + } std::vector tablets = {tabletId0, tabletId0 + 1, tabletId0 + 2}; std::vector tabletGen = {1, 1, 1}; @@ -508,7 +513,7 @@ void TestVerifiedRandom(TBlobDepotTestEnvironment& tenv, ui32 nodeCount, ui64 ta ui32 hardCollectGen = state[tabletId].Channels[channel].HardCollectGen; ui32 hardCollectStep = state[tabletId].Channels[channel].HardCollectStep; - ui32 action = act.GetInterval(tenv.Rand(act.UpperLimit())); + ui32 action = act.GetRandom(); // Cerr << "iteration# " << iteration << " action# " << actionName[action] << " timer# " << timer.Passed() << Endl; switch (action) { case EActions::ALTER: @@ -689,13 +694,13 @@ void TestVerifiedRandom(TBlobDepotTestEnvironment& tenv, ui32 nodeCount, ui64 ta break; default: - UNIT_FAIL("TIntervals failed"); + UNIT_FAIL("Unknown action# " << action); } } } void TestLoadPutAndGet(TBlobDepotTestEnvironment& tenv, ui64 tabletId, ui32 groupId, ui32 blobsNum, ui32 maxBlobSize, - ui32 readsNum, bool decommit, ui32 timeLimitSec, std::vector probablities) { + ui32 readsNum, bool decommit, ui32 timeLimitSec, std::vector probabilities) { enum EActions { GET, MULTIGET, @@ -703,9 +708,14 @@ void TestLoadPutAndGet(TBlobDepotTestEnvironment& tenv, ui64 tabletId, ui32 grou DISCOVER, CATCH_ALL, RESTART_BLOB_DEPOT, + __COUNT__, }; - std::vector probs = probablities; - TIntervals act(probs); + + TWeightedRandom act(tenv.RandomSeed + 0xABCD); + Y_ABORT_UNLESS(probabilities.size() == EActions::__COUNT__); + for (ui32 i = 0; i < probabilities.size(); ++i) { + act.AddValue(i, probabilities[i]); + } std::vector blobs; std::map mappedBlobs; @@ -748,7 +758,7 @@ void TestLoadPutAndGet(TBlobDepotTestEnvironment& tenv, ui64 tabletId, ui32 grou THPTimer timer; for (ui32 iteration = 0; iteration < readsNum; ++iteration) { - ui32 action = act.GetInterval(tenv.Rand(act.UpperLimit())); + ui32 action = act.GetRandom(); if (iteration == readsNum - 1) { // Catch all results on the last iteration action = EActions::CATCH_ALL; } @@ -875,7 +885,7 @@ void TestLoadPutAndGet(TBlobDepotTestEnvironment& tenv, ui64 tabletId, ui32 grou break; default: - UNIT_FAIL("TIntervals failed"); + UNIT_FAIL("Unknown action# " << action); } } } diff --git a/ydb/core/blobstorage/ut_blobstorage/lib/common.h b/ydb/core/blobstorage/ut_blobstorage/lib/common.h index e581eeb206f8..462d6f67861e 100644 --- a/ydb/core/blobstorage/ut_blobstorage/lib/common.h +++ b/ydb/core/blobstorage/ut_blobstorage/lib/common.h @@ -2,6 +2,7 @@ #include "env.h" #include +#include inline TBlobStorageGroupType GetErasureTypeByString(const TString& erasure) { @@ -55,3 +56,44 @@ inline std::vector MakePDiskLayout(const NKikimrBlobStorage::TBaseConfig& } return pdiskLayout; } + +template +class TWeightedRandom { +public: + TWeightedRandom(ui64 seed = 0) + : PrefixSum({ 0 }) + , Mt64(seed) + {} + + TWeightedRandom(const TWeightedRandom&) = default; + TWeightedRandom(TWeightedRandom&&) = default; + TWeightedRandom& operator=(const TWeightedRandom&) = default; + TWeightedRandom& operator=(TWeightedRandom&&) = default; + + void AddValue(T value, ui64 weight) { + PrefixSum.push_back(weight + PrefixSum.back()); + Values.push_back(value); + } + + T GetRandom() { + Y_ABORT_UNLESS(WeightSum() != 0); + return Get(Mt64() % WeightSum()); + } + + T Get(ui64 w) { + Y_ABORT_UNLESS(PrefixSum.size() > 1); + auto it = std::upper_bound(PrefixSum.begin(), PrefixSum.end(), w); + Y_ABORT_UNLESS(it > PrefixSum.begin()); + ui32 idx = it - PrefixSum.begin() - 1; + return Values[idx]; + } + + ui32 WeightSum() { + return PrefixSum.back(); + } + +private: + std::vector Values; + std::vector PrefixSum; + TMersenne Mt64; +}; diff --git a/ydb/core/blobstorage/ut_blobstorage/lib/env.h b/ydb/core/blobstorage/ut_blobstorage/lib/env.h index f079b571104e..c3923969aa39 100644 --- a/ydb/core/blobstorage/ut_blobstorage/lib/env.h +++ b/ydb/core/blobstorage/ut_blobstorage/lib/env.h @@ -48,6 +48,8 @@ struct TEnvironmentSetup { const ui32 MinHugeBlobInBytes = 0; const float DiskTimeAvailableScale = 1; const bool UseFakeConfigDispatcher = false; + const float SlowDiskThreshold = 2; + const float VDiskPredictedDelayMultiplier = 1; }; const TSettings Settings; @@ -363,6 +365,7 @@ struct TEnvironmentSetup { auto config = MakeIntrusive(new TMockPDiskServiceFactory(*this)); config->BlobStorageConfig.MutableServiceSet()->AddAvailabilityDomains(DomainId); config->VDiskReplPausedAtStart = Settings.VDiskReplPausedAtStart; + config->UseActorSystemTimeInBSQueue = true; if (Settings.ConfigPreprocessor) { Settings.ConfigPreprocessor(nodeId, *config); } @@ -396,6 +399,9 @@ struct TEnvironmentSetup { ADD_ICB_CONTROL("VDiskControls.DiskTimeAvailableScaleHDD", 1'000, 1, 1'000'000, std::round(Settings.DiskTimeAvailableScale * 1'000)); ADD_ICB_CONTROL("VDiskControls.DiskTimeAvailableScaleSSD", 1'000, 1, 1'000'000, std::round(Settings.DiskTimeAvailableScale * 1'000)); ADD_ICB_CONTROL("VDiskControls.DiskTimeAvailableScaleNVME", 1'000, 1, 1'000'000, std::round(Settings.DiskTimeAvailableScale * 1'000)); + + ADD_ICB_CONTROL("DSProxyControls.SlowDiskThreshold", 2'000, 1, 1'000'000, std::round(Settings.SlowDiskThreshold * 1'000)); + ADD_ICB_CONTROL("DSProxyControls.PredictedDelayMultiplier", 1'000, 1, 1'000'000, std::round(Settings.VDiskPredictedDelayMultiplier * 1'000)); #undef ADD_ICB_CONTROL { diff --git a/ydb/core/blobstorage/ut_group/main.cpp b/ydb/core/blobstorage/ut_group/main.cpp index 4debcce43505..2dfada62548c 100644 --- a/ydb/core/blobstorage/ut_group/main.cpp +++ b/ydb/core/blobstorage/ut_group/main.cpp @@ -407,8 +407,17 @@ class TTestEnv { auto proxy = Counters->GetSubgroup("subsystem", "proxy"); TIntrusivePtr mon = MakeIntrusive(proxy, true); StoragePoolCounters = MakeIntrusive(proxy, TString(), NPDisk::DEVICE_TYPE_SSD); + TControlWrapper enablePutBatching(DefaultEnablePutBatching, false, true); + TControlWrapper enableVPatch(DefaultEnableVPatch, false, true); + TControlWrapper slowDiskThreshold(DefaultSlowDiskThreshold * 1000, 1, 1000000); + TControlWrapper predictedDelayMultiplier(DefaultPredictedDelayMultiplier * 1000, 1, 1000000); std::unique_ptr proxyActor{CreateBlobStorageGroupProxyConfigured(TIntrusivePtr(Info), false, mon, - TIntrusivePtr(StoragePoolCounters), DefaultEnablePutBatching, DefaultEnableVPatch)}; + TIntrusivePtr(StoragePoolCounters), TBlobStorageProxyParameters{ + .EnablePutBatching = enablePutBatching, + .EnableVPatch = enableVPatch, + .SlowDiskThreshold = slowDiskThreshold, + .PredictedDelayMultiplier = predictedDelayMultiplier, + })}; const TActorId& actorId = runtime.Register(proxyActor.release(), TActorId(), 0, std::nullopt, 1); runtime.RegisterService(MakeBlobStorageProxyID(GroupId), actorId); } diff --git a/ydb/core/blobstorage/vdisk/balance/balancing_actor.cpp b/ydb/core/blobstorage/vdisk/balance/balancing_actor.cpp index ff14300f9b84..9fcea654e21a 100644 --- a/ydb/core/blobstorage/vdisk/balance/balancing_actor.cpp +++ b/ydb/core/blobstorage/vdisk/balance/balancing_actor.cpp @@ -157,7 +157,7 @@ namespace NBalancing { CreateQueuesForVDisks(*QueueActorMapPtr, SelfId(), GInfo, Ctx->VCtx, GInfo->GetVDisks(), Ctx->MonGroup.GetGroup(), queueClientId, NKikimrBlobStorage::EVDiskQueueId::GetAsyncRead, - "DisksBalancing", interconnectChannel); + "DisksBalancing", interconnectChannel, false); } void Handle(NActors::TEvents::TEvUndelivered::TPtr ev) { diff --git a/ydb/core/blobstorage/vdisk/common/vdisk_config.h b/ydb/core/blobstorage/vdisk/common/vdisk_config.h index 73b1d27daaa1..84f791ae2a48 100644 --- a/ydb/core/blobstorage/vdisk/common/vdisk_config.h +++ b/ydb/core/blobstorage/vdisk/common/vdisk_config.h @@ -219,6 +219,7 @@ namespace NKikimr { bool EnableVDiskCooldownTimeout; TControlWrapper EnableVPatch = true; TControlWrapper DefaultHugeGarbagePerMille; + bool UseActorSystemTimeInBSQueue = false; ///////////// COST METRICS SETTINGS //////////////// bool UseCostTracker = true; diff --git a/ydb/core/blobstorage/vdisk/common/vdisk_queues.h b/ydb/core/blobstorage/vdisk/common/vdisk_queues.h index 7f2c3e8af107..5ccbac0278c2 100644 --- a/ydb/core/blobstorage/vdisk/common/vdisk_queues.h +++ b/ydb/core/blobstorage/vdisk/common/vdisk_queues.h @@ -51,7 +51,7 @@ namespace NKikimr { const TIntrusivePtr<::NMonitoring::TDynamicCounters> &groupCounters, const NBackpressure::TQueueClientId &queueClientId, NKikimrBlobStorage::EVDiskQueueId vDiskQueueId, const TString &queueName, TInterconnectChannels::EInterconnectChannels interconnectChannel, - TWrapper wrapper = {}) + const bool useActorSystemTimeInBSQueue, TWrapper wrapper = {}) { for (auto &vdiskInfo : disks) { auto vdisk = GetVDiskID(vdiskInfo); @@ -62,7 +62,8 @@ namespace NKikimr { queue.reset(CreateVDiskBackpressureClient(gInfo, vdisk, vDiskQueueId, groupCounters, vCtx, queueClientId, queueName, interconnectChannel, vdiskActorId.NodeId() == parent.NodeId(), - TDuration::Minutes(1), flowRecord, NMonitoring::TCountableBase::EVisibility::Private)); + TDuration::Minutes(1), flowRecord, NMonitoring::TCountableBase::EVisibility::Private, + useActorSystemTimeInBSQueue)); TActorId serviceId = TActivationContext::Register(queue.release(), parent); EmplaceToContainer(cont, vdisk, wrapper.Wrap(std::move(serviceId))); } diff --git a/ydb/core/blobstorage/vdisk/repl/blobstorage_repl.cpp b/ydb/core/blobstorage/vdisk/repl/blobstorage_repl.cpp index 45f7f4250a02..4962f625fd04 100644 --- a/ydb/core/blobstorage/vdisk/repl/blobstorage_repl.cpp +++ b/ydb/core/blobstorage/vdisk/repl/blobstorage_repl.cpp @@ -207,7 +207,7 @@ namespace NKikimr { CreateQueuesForVDisks(*QueueActorMapPtr, SelfId(), ReplCtx->GInfo, ReplCtx->VCtx, ReplCtx->GInfo->GetVDisks(), ReplCtx->MonGroup.GetGroup(), replQueueClientId, NKikimrBlobStorage::EVDiskQueueId::GetAsyncRead, - "PeerRepl", replInterconnectChannel); + "PeerRepl", replInterconnectChannel, false); for (const auto& [vdiskId, vdiskActorId] : ReplCtx->VDiskCfg->BaseInfo.DonorDiskIds) { TIntrusivePtr flowRecord(new NBackpressure::TFlowRecord); diff --git a/ydb/core/blobstorage/vdisk/scrub/blob_recovery_queue.cpp b/ydb/core/blobstorage/vdisk/scrub/blob_recovery_queue.cpp index f1c0d7052dc6..5d274fbdf261 100644 --- a/ydb/core/blobstorage/vdisk/scrub/blob_recovery_queue.cpp +++ b/ydb/core/blobstorage/vdisk/scrub/blob_recovery_queue.cpp @@ -14,7 +14,7 @@ namespace NKikimr { Info->GetTotalVDisksNum() + Info->GetOrderNumber(VCtx->ShortSelfVDisk)); // distinct queue client id CreateQueuesForVDisks(Queues, SelfId(), Info, VCtx, Info->GetVDisks(), Counters, clientId, NKikimrBlobStorage::EVDiskQueueId::GetLowRead, "PeerScrub", - TInterconnectChannels::IC_BLOBSTORAGE_ASYNC_DATA, TQueueActorIdWrapper()); + TInterconnectChannels::IC_BLOBSTORAGE_ASYNC_DATA, false, TQueueActorIdWrapper()); } void TBlobRecoveryActor::StopQueues() { diff --git a/ydb/core/blobstorage/vdisk/skeleton/blobstorage_skeleton.cpp b/ydb/core/blobstorage/vdisk/skeleton/blobstorage_skeleton.cpp index c7a7ff970a4c..1feffca3aaa5 100644 --- a/ydb/core/blobstorage/vdisk/skeleton/blobstorage_skeleton.cpp +++ b/ydb/core/blobstorage/vdisk/skeleton/blobstorage_skeleton.cpp @@ -258,7 +258,8 @@ namespace NKikimr { VCtx->Top->GetOrderNumber(VCtx->ShortSelfVDisk)); CreateQueuesForVDisks(VPatchCtx->AsyncBlobQueues, SelfId(), GInfo, VCtx, GInfo->GetVDisks(), patchGroup, patchQueueClientId, NKikimrBlobStorage::EVDiskQueueId::PutAsyncBlob, - "PeerVPatch", TInterconnectChannels::IC_BLOBSTORAGE_ASYNC_DATA); + "PeerVPatch", TInterconnectChannels::IC_BLOBSTORAGE_ASYNC_DATA, + Config->UseActorSystemTimeInBSQueue); } } diff --git a/ydb/core/cms/json_proxy_proto.h b/ydb/core/cms/json_proxy_proto.h index 8079eb6971e2..642c28b7617e 100644 --- a/ydb/core/cms/json_proxy_proto.h +++ b/ydb/core/cms/json_proxy_proto.h @@ -80,6 +80,8 @@ class TJsonProxyProto : public TActorBootstrapped { return ReplyWithTypeDescription(*NKikimrConfig::TImmediateControlsConfig::TVDiskControls::descriptor(), ctx); else if (name == ".NKikimrConfig.TImmediateControlsConfig.TTabletControls") return ReplyWithTypeDescription(*NKikimrConfig::TImmediateControlsConfig::TTabletControls::descriptor(), ctx); + else if (name == ".NKikimrConfig.TImmediateControlsConfig.TDSProxyControls") + return ReplyWithTypeDescription(*NKikimrConfig::TImmediateControlsConfig::TDSProxyControls::descriptor(), ctx); } ctx.Send(RequestEvent->Sender, diff --git a/ydb/core/protos/config.proto b/ydb/core/protos/config.proto index a44ea1acb36b..da20031f1c31 100644 --- a/ydb/core/protos/config.proto +++ b/ydb/core/protos/config.proto @@ -1308,6 +1308,19 @@ message TImmediateControlsConfig { DefaultValue: 256 }]; } + message TDSProxyControls { + optional uint64 SlowDiskThreshold = 1 [(ControlOptions) = { + Description: "The minimum ratio of slowest and second slowest disks, required to accelerate, actual value is divided by 1000", + MinValue: 1, + MaxValue: 1000000, + DefaultValue: 2000 }]; + optional uint64 PredictedDelayMultiplier = 2 [(ControlOptions) = { + Description: "Predicted time of VDisk's response is multiplied by this value divided by 1000", + MinValue: 0, + MaxValue: 1000000, + DefaultValue: 1000 }]; + } + optional TDataShardControls DataShardControls = 1; optional TTxLimitControls TxLimitControls = 2; optional TCoordinatorControls CoordinatorControls = 3; @@ -1316,6 +1329,7 @@ message TImmediateControlsConfig { reserved 6; optional TVDiskControls VDiskControls = 7; optional TTabletControls TabletControls = 8; + optional TDSProxyControls DSProxyControls = 9; }; message TMeteringConfig { From e4f6ac979f66eb3127834150cc93fe7558406e66 Mon Sep 17 00:00:00 2001 From: Sergey Belyakov Date: Mon, 16 Sep 2024 17:33:36 +0300 Subject: [PATCH 07/13] Fix acceleration in mirror-3-dc groups (#7931) --- ydb/core/blobstorage/dsproxy/dsproxy.h | 11 +- .../dsproxy/dsproxy_blackboard.cpp | 46 +++++- .../blobstorage/dsproxy/dsproxy_blackboard.h | 11 +- ydb/core/blobstorage/dsproxy/dsproxy_get.cpp | 12 +- .../blobstorage/dsproxy/dsproxy_get_impl.cpp | 12 +- ydb/core/blobstorage/dsproxy/dsproxy_impl.cpp | 2 + ydb/core/blobstorage/dsproxy/dsproxy_impl.h | 2 + ydb/core/blobstorage/dsproxy/dsproxy_put.cpp | 4 +- .../blobstorage/dsproxy/dsproxy_put_impl.cpp | 4 +- .../blobstorage/dsproxy/dsproxy_state.cpp | 1 + .../dsproxy/dsproxy_strategy_accelerate_put.h | 8 +- .../dsproxy_strategy_accelerate_put_m3dc.h | 78 +++++----- .../dsproxy/dsproxy_strategy_base.cpp | 83 ++++------- .../dsproxy/dsproxy_strategy_base.h | 12 +- .../dsproxy/dsproxy_strategy_get_m3dc_basic.h | 37 ++--- .../dsproxy_strategy_get_min_iops_block.h | 3 +- .../dsproxy/dsproxy_strategy_put_m3dc.h | 13 +- .../dsproxy/dsproxy_strategy_restore.h | 21 +-- .../nodewarden/node_warden_impl.cpp | 1 + .../blobstorage/nodewarden/node_warden_impl.h | 6 +- .../nodewarden/node_warden_proxy.cpp | 3 + .../ut_blobstorage/acceleration.cpp | 133 ++++++++++++------ .../blobstorage/ut_blobstorage/lib/common.h | 6 +- ydb/core/blobstorage/ut_blobstorage/lib/env.h | 3 + ydb/core/protos/config.proto | 32 ++++- 25 files changed, 304 insertions(+), 240 deletions(-) diff --git a/ydb/core/blobstorage/dsproxy/dsproxy.h b/ydb/core/blobstorage/dsproxy/dsproxy.h index 21a9ab43011c..dc1db49ace20 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy.h +++ b/ydb/core/blobstorage/dsproxy/dsproxy.h @@ -55,8 +55,9 @@ const ui32 MaskSizeBits = 32; constexpr bool DefaultEnablePutBatching = true; constexpr bool DefaultEnableVPatch = false; -constexpr float DefaultSlowDiskThreshold = 2; -constexpr float DefaultPredictedDelayMultiplier = 1; +constexpr double DefaultSlowDiskThreshold = 2; +constexpr double DefaultPredictedDelayMultiplier = 1; +constexpr ui32 DefaultMaxNumOfSlowDisks = 2; constexpr bool WithMovingPatchRequestToStaticNode = true; @@ -172,8 +173,9 @@ inline void SetExecutionRelay(IEventBase& ev, std::shared_ptr @@ -851,6 +853,7 @@ struct TBlobStorageProxyParameters { const TControlWrapper& EnableVPatch; const TControlWrapper& SlowDiskThreshold; const TControlWrapper& PredictedDelayMultiplier; + const TControlWrapper& MaxNumOfSlowDisks = TControlWrapper(DefaultMaxNumOfSlowDisks, 1, 2); }; IActor* CreateBlobStorageGroupProxyConfigured(TIntrusivePtr&& info, diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_blackboard.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_blackboard.cpp index 1de899664b5c..c705c9a04e2a 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_blackboard.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_blackboard.cpp @@ -179,15 +179,16 @@ ui64 TBlobState::GetPredictedDelayNs(const TBlobStorageGroupInfo &info, TGroupQu void TBlobState::GetWorstPredictedDelaysNs(const TBlobStorageGroupInfo &info, TGroupQueues &groupQueues, NKikimrBlobStorage::EVDiskQueueId queueId, TDiskDelayPredictions *outNWorst, - double multiplier) const { + const TAccelerationParams& accelerationParams) const { outNWorst->resize(Disks.size()); for (ui32 diskIdx = 0; diskIdx < Disks.size(); ++diskIdx) { + ui64 predictedDelayNs = GetPredictedDelayNs(info, groupQueues, diskIdx, queueId); (*outNWorst)[diskIdx] = { - static_cast(GetPredictedDelayNs(info, groupQueues, diskIdx, queueId) * multiplier), + static_cast(predictedDelayNs * accelerationParams.PredictedDelayMultiplier), diskIdx }; } - ui32 sortedPrefixSize = std::min(3u, (ui32)Disks.size()); + ui32 sortedPrefixSize = std::min(accelerationParams.MaxNumOfSlowDisks + 1, (ui32)Disks.size()); std::partial_sort(outNWorst->begin(), outNWorst->begin() + sortedPrefixSize, outNWorst->end()); } @@ -466,16 +467,18 @@ void TBlackboard::ReportPartMapStatus(const TLogoBlobID &id, ssize_t partMapInde void TBlackboard::GetWorstPredictedDelaysNs(const TBlobStorageGroupInfo &info, TGroupQueues &groupQueues, NKikimrBlobStorage::EVDiskQueueId queueId, TDiskDelayPredictions *outNWorst, - double multiplier) const { + const TAccelerationParams& accelerationParams) const { ui32 totalVDisks = info.GetTotalVDisksNum(); outNWorst->resize(totalVDisks); for (ui32 orderNumber = 0; orderNumber < totalVDisks; ++orderNumber) { + ui64 predictedDelayNs = groupQueues.GetPredictedDelayNsByOrderNumber(orderNumber, queueId); (*outNWorst)[orderNumber] = { - static_cast(groupQueues.GetPredictedDelayNsByOrderNumber(orderNumber, queueId) * multiplier), + static_cast(predictedDelayNs * accelerationParams.PredictedDelayMultiplier), orderNumber }; } - std::partial_sort(outNWorst->begin(), outNWorst->begin() + std::min(3u, totalVDisks), outNWorst->end()); + ui32 sortedPrefixSize = std::min(accelerationParams.MaxNumOfSlowDisks + 1, totalVDisks); + std::partial_sort(outNWorst->begin(), outNWorst->begin() + sortedPrefixSize, outNWorst->end()); } void TBlackboard::RegisterBlobForPut(const TLogoBlobID& id, size_t blobIdx) { @@ -542,4 +545,35 @@ void TBlackboard::InvalidatePartStates(ui32 orderNumber) { } } +void TBlackboard::MarkSlowDisks(TBlobState& state, bool isPut, const TAccelerationParams& accelerationParams) { + // by default all disks are considered fast + for (TBlobState::TDisk& disk : state.Disks) { + disk.IsSlow = false; + } + + ui32 maxNumSlow = accelerationParams.MaxNumOfSlowDisks; + if (Info->GetTotalVDisksNum() <= maxNumSlow) { + // all disks cannot be slow + return; + } + + TDiskDelayPredictions worstDisks; + state.GetWorstPredictedDelaysNs(*Info, *GroupQueues, + (isPut ? HandleClassToQueueId(PutHandleClass) : HandleClassToQueueId(GetHandleClass)), + &worstDisks, accelerationParams); + + ui64 slowThreshold = worstDisks[maxNumSlow].PredictedNs * accelerationParams.SlowDiskThreshold; + if (slowThreshold == 0) { + // invalid or non-initialized predicted ns, consider all disks not slow + return; + } + + for (ui32 idx = 0; idx < maxNumSlow; ++idx) { + if (worstDisks[idx].PredictedNs > slowThreshold) { + ui32 orderNumber = worstDisks[idx].DiskIdx; + state.Disks[orderNumber].IsSlow = true; + } + } +} + }//NKikimr diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_blackboard.h b/ydb/core/blobstorage/dsproxy/dsproxy_blackboard.h index 1cf53be9675e..947dad80446c 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_blackboard.h +++ b/ydb/core/blobstorage/dsproxy/dsproxy_blackboard.h @@ -98,10 +98,9 @@ struct TBlobState { ui32 diskIdxInSubring, NKikimrBlobStorage::EVDiskQueueId queueId) const; void GetWorstPredictedDelaysNs(const TBlobStorageGroupInfo &info, TGroupQueues &groupQueues, NKikimrBlobStorage::EVDiskQueueId queueId, TDiskDelayPredictions *outNWorst, - double multipler = 1) const; + const TAccelerationParams& accelerationParams) const; TString ToString() const; bool HasWrittenQuorum(const TBlobStorageGroupInfo& info, const TBlobStorageGroupInfo::TGroupVDisks& expired) const; - static TString SituationToString(ESituation situation); }; @@ -165,7 +164,7 @@ class IStrategy { struct TBlackboard { enum EAccelerationMode { - AccelerationModeSkipOneSlowest, + AccelerationModeSkipNSlowest, AccelerationModeSkipMarked }; @@ -189,7 +188,7 @@ struct TBlackboard { NKikimrBlobStorage::EPutHandleClass putHandleClass, NKikimrBlobStorage::EGetHandleClass getHandleClass) : Info(info) , GroupQueues(groupQueues) - , AccelerationMode(AccelerationModeSkipOneSlowest) + , AccelerationMode(AccelerationModeSkipNSlowest) , PutHandleClass(putHandleClass) , GetHandleClass(getHandleClass) {} @@ -212,7 +211,7 @@ struct TBlackboard { void ReportPartMapStatus(const TLogoBlobID &id, ssize_t partMapIndex, ui32 responseIndex, NKikimrProto::EReplyStatus status); void GetWorstPredictedDelaysNs(const TBlobStorageGroupInfo &info, TGroupQueues &groupQueues, NKikimrBlobStorage::EVDiskQueueId queueId, TDiskDelayPredictions *outNWorst, - double multiplier = 1) const; + const TAccelerationParams& accelerationParams) const; TString ToString() const; void ChangeAll() { @@ -225,6 +224,8 @@ struct TBlackboard { void RegisterBlobForPut(const TLogoBlobID& id, size_t blobIdx); + void MarkSlowDisks(TBlobState& state, bool isPut, const TAccelerationParams& accelerationParams); + TBlobState& operator [](const TLogoBlobID& id); }; diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_get.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_get.cpp index 86542ed2b1d4..946a16e88260 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_get.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_get.cpp @@ -314,9 +314,9 @@ class TBlobStorageGroupGetRequest : public TBlobStorageGroupRequestActorsecond.GetWorstPredictedDelaysNs( - *Info, *Blackboard.GroupQueues, queueId, &worstDisks, - AccelerationParams.PredictedDelayMultiplier); + Blackboard.BlobStates.begin()->second.GetWorstPredictedDelaysNs(*Info, *Blackboard.GroupQueues, + queueId, &worstDisks, AccelerationParams); } else { - Blackboard.GetWorstPredictedDelaysNs( - *Info, *Blackboard.GroupQueues, queueId, &worstDisks, - AccelerationParams.PredictedDelayMultiplier); + Blackboard.GetWorstPredictedDelaysNs(*Info, *Blackboard.GroupQueues, queueId, &worstDisks, + AccelerationParams); } - return worstDisks[std::min(3u, (ui32)worstDisks.size() - 1)].PredictedNs; + return worstDisks[std::min(AccelerationParams.MaxNumOfSlowDisks, (ui32)worstDisks.size() - 1)].PredictedNs; } ui64 TGetImpl::GetTimeToAccelerateGetNs(TLogContext &logCtx) { diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_impl.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_impl.cpp index da777823942c..339f0b5dfcc5 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_impl.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_impl.cpp @@ -19,6 +19,7 @@ namespace NKikimr { , EnableVPatch(params.EnableVPatch) , SlowDiskThreshold(params.SlowDiskThreshold) , PredictedDelayMultiplier(params.PredictedDelayMultiplier) + , MaxNumOfSlowDisks(params.MaxNumOfSlowDisks) {} TBlobStorageGroupProxy::TBlobStorageGroupProxy(ui32 groupId, bool isEjected,TIntrusivePtr &nodeMon, @@ -32,6 +33,7 @@ namespace NKikimr { , EnableVPatch(params.EnableVPatch) , SlowDiskThreshold(params.SlowDiskThreshold) , PredictedDelayMultiplier(params.PredictedDelayMultiplier) + , MaxNumOfSlowDisks(params.MaxNumOfSlowDisks) {} IActor* CreateBlobStorageGroupEjectedProxy(ui32 groupId, TIntrusivePtr &nodeMon) { diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_impl.h b/ydb/core/blobstorage/dsproxy/dsproxy_impl.h index 1a282bfeb2ff..d4afbd620774 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_impl.h +++ b/ydb/core/blobstorage/dsproxy/dsproxy_impl.h @@ -119,8 +119,10 @@ class TBlobStorageGroupProxy : public TActorBootstrapped bool HasInvalidGroupId() const { return GroupId.GetRawId() == Max(); } void ProcessInitQueue(); + // Acceleration parameters TMemorizableControlWrapper SlowDiskThreshold; TMemorizableControlWrapper PredictedDelayMultiplier; + TMemorizableControlWrapper MaxNumOfSlowDisks; TAccelerationParams GetAccelerationParams(); diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_put.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_put.cpp index fc9f65bce668..5197b3a6e41f 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_put.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_put.cpp @@ -382,8 +382,8 @@ class TBlobStorageGroupPutRequest : public TBlobStorageGroupRequestActor 0 && WaitingVDiskCount <= 2 && RequestsSent > 1) { + if (!IsAccelerateScheduled && AccelerateRequestsSent < AccelerationParams.MaxNumOfSlowDisks) { + if (WaitingVDiskCount > 0 && WaitingVDiskCount <= AccelerationParams.MaxNumOfSlowDisks && RequestsSent > 1) { ui64 timeToAccelerateUs = Max(1, PutImpl.GetTimeToAccelerateNs(LogCtx) / 1000); if (RequestsPendingBeforeAcceleration == 1 && AccelerateRequestsSent == 1) { // if there is only one request pending, but first accelerate is unsuccessful, make a pause diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_put_impl.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_put_impl.cpp index d3ce08c2feef..67e9a2bd6412 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_put_impl.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_put_impl.cpp @@ -91,8 +91,8 @@ ui64 TPutImpl::GetTimeToAccelerateNs(TLogContext &logCtx) { // Find the n'th slowest disk TDiskDelayPredictions worstDisks; state.GetWorstPredictedDelaysNs(*Info, *Blackboard.GroupQueues, HandleClassToQueueId(Blackboard.PutHandleClass), - &worstDisks, AccelerationParams.PredictedDelayMultiplier); - nthWorstPredictedNsVec[idx++] = worstDisks[2].PredictedNs; + &worstDisks, AccelerationParams); + nthWorstPredictedNsVec[idx++] = worstDisks[AccelerationParams.MaxNumOfSlowDisks].PredictedNs; } return *MaxElement(nthWorstPredictedNsVec.begin(), nthWorstPredictedNsVec.end()); } diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_state.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_state.cpp index 4b0b10450dfc..d0676f7c226a 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_state.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_state.cpp @@ -327,6 +327,7 @@ namespace NKikimr { return TAccelerationParams{ .SlowDiskThreshold = .001f * SlowDiskThreshold.Update(TActivationContext::Now()), .PredictedDelayMultiplier = .001f * PredictedDelayMultiplier.Update(TActivationContext::Now()), + .MaxNumOfSlowDisks = (ui32)MaxNumOfSlowDisks.Update(TActivationContext::Now()), }; } diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_strategy_accelerate_put.h b/ydb/core/blobstorage/dsproxy/dsproxy_strategy_accelerate_put.h index 4256081130b3..9e78e5c73248 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_strategy_accelerate_put.h +++ b/ydb/core/blobstorage/dsproxy/dsproxy_strategy_accelerate_put.h @@ -15,22 +15,22 @@ class TAcceleratePutStrategy : public TStrategyBase { const TAccelerationParams& accelerationParams) override { Y_UNUSED(accelerationParams); // Find the unput part and disk - TStackVec badDiskIdxs; + ui32 badDisksMask = 0; for (size_t diskIdx = 0; diskIdx < state.Disks.size(); ++diskIdx) { TBlobState::TDisk &disk = state.Disks[diskIdx]; for (size_t partIdx = 0; partIdx < disk.DiskParts.size(); ++partIdx) { TBlobState::TDiskPart &diskPart = disk.DiskParts[partIdx]; if (diskPart.Situation == TBlobState::ESituation::Sent) { - badDiskIdxs.push_back(diskIdx); + badDisksMask |= (1 << diskIdx); } } } - if (!badDiskIdxs.empty()) { + if (badDisksMask > 0) { // Mark the corresponding disks 'bad' // Prepare part layout if possible TBlobStorageGroupType::TPartLayout layout; - PreparePartLayout(state, info, &layout, badDiskIdxs); + PreparePartLayout(state, info, &layout, badDisksMask); TBlobStorageGroupType::TPartPlacement partPlacement; bool isCorrectable = info.Type.CorrectLayout(layout, partPlacement); diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_strategy_accelerate_put_m3dc.h b/ydb/core/blobstorage/dsproxy/dsproxy_strategy_accelerate_put_m3dc.h index 69804e441002..806155f54d9e 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_strategy_accelerate_put_m3dc.h +++ b/ydb/core/blobstorage/dsproxy/dsproxy_strategy_accelerate_put_m3dc.h @@ -30,50 +30,56 @@ class TAcceleratePut3dcStrategy : public TStrategyBase { } EStrategyOutcome Process(TLogContext &logCtx, TBlobState &state, const TBlobStorageGroupInfo &info, - TBlackboard& /*blackboard*/, TGroupDiskRequests &groupDiskRequests, + TBlackboard& blackboard, TGroupDiskRequests &groupDiskRequests, const TAccelerationParams& accelerationParams) override { Y_UNUSED(accelerationParams); // Find the unput parts and disks - ui32 badDiskMask = 0; - for (size_t diskIdx = 0; diskIdx < state.Disks.size(); ++diskIdx) { + bool unresponsiveDisk = false; + for (size_t diskIdx = 0; diskIdx < state.Disks.size() && !unresponsiveDisk; ++diskIdx) { TBlobState::TDisk &disk = state.Disks[diskIdx]; - for (size_t partIdx = 0; partIdx < disk.DiskParts.size(); ++partIdx) { - TBlobState::TDiskPart &diskPart = disk.DiskParts[partIdx]; + for (TBlobState::TDiskPart &diskPart : disk.DiskParts) { if (diskPart.Situation == TBlobState::ESituation::Sent) { - badDiskMask |= (1 << diskIdx); + unresponsiveDisk = true; + break; } } } - if (badDiskMask > 0) { - // Mark the 'bad' disk as the single slow disk - for (size_t diskIdx = 0; diskIdx < state.Disks.size(); ++diskIdx) { - state.Disks[diskIdx].IsSlow = badDiskMask & (1 << diskIdx); - } - - // Prepare part placement if possible - TBlobStorageGroupType::TPartPlacement partPlacement; - bool degraded = false; - - // check if we are in degraded mode -- that means that we have one fully failed realm - TBlobStorageGroupInfo::TSubgroupVDisks success(&info.GetTopology()); - TBlobStorageGroupInfo::TSubgroupVDisks error(&info.GetTopology()); - Evaluate3dcSituation(state, NumFailRealms, NumFailDomainsPerFailRealm, info, true, success, error, degraded); - - // check for failure tolerance; we issue ERROR in case when it is not possible to achieve success condition in - // any way; also check if we have already finished writing replicas - const auto& checker = info.GetQuorumChecker(); - if (checker.CheckFailModelForSubgroup(error)) { - if (checker.CheckQuorumForSubgroup(success)) { - // OK - return EStrategyOutcome::DONE; - } - - // now check every realm and check if we have to issue some write requests to it - Prepare3dcPartPlacement(state, NumFailRealms, NumFailDomainsPerFailRealm, - PreferredReplicasPerRealm(degraded), true, partPlacement); - - if (IsPutNeeded(state, partPlacement)) { - PreparePutsForPartPlacement(logCtx, state, info, groupDiskRequests, partPlacement); + if (unresponsiveDisk) { + blackboard.MarkSlowDisks(state, true, accelerationParams); + + for (bool considerSlowAsError : {true, false}) { + // Prepare part placement if possible + TBlobStorageGroupType::TPartPlacement partPlacement; + bool degraded = false; + + // check if we are in degraded mode -- that means that we have one fully failed realm + TBlobStorageGroupInfo::TSubgroupVDisks success(&info.GetTopology()); + TBlobStorageGroupInfo::TSubgroupVDisks error(&info.GetTopology()); + Evaluate3dcSituation(state, NumFailRealms, NumFailDomainsPerFailRealm, info, considerSlowAsError, + success, error, degraded); + // check for failure tolerance; we issue ERROR in case when it is not possible to achieve success condition in + // any way; also check if we have already finished writing replicas + const auto& checker = info.GetQuorumChecker(); + if (checker.CheckFailModelForSubgroup(error)) { + if (checker.CheckQuorumForSubgroup(success)) { + // OK + return EStrategyOutcome::DONE; + } + + // now check every realm and check if we have to issue some write requests to it + bool fullPlacement; + Prepare3dcPartPlacement(state, NumFailRealms, NumFailDomainsPerFailRealm, + PreferredReplicasPerRealm(degraded), considerSlowAsError, true, partPlacement, fullPlacement); + + if (considerSlowAsError && !fullPlacement) { + // unable to place all parts to fast disks, retry + continue; + } + + if (IsPutNeeded(state, partPlacement)) { + PreparePutsForPartPlacement(logCtx, state, info, groupDiskRequests, partPlacement); + } + break; } } } diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_strategy_base.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_strategy_base.cpp index 690765f9d48f..f2b3dfc8b151 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_strategy_base.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_strategy_base.cpp @@ -194,7 +194,7 @@ void TStrategyBase::AddGetRequest(TLogContext &logCtx, TGroupDiskRequests &group } void TStrategyBase::PreparePartLayout(const TBlobState &state, const TBlobStorageGroupInfo &info, - TBlobStorageGroupType::TPartLayout *layout, const TStackVec& slowDiskIdxs) { + TBlobStorageGroupType::TPartLayout *layout, ui32 slowDiskSubgroupMask) { Y_ABORT_UNLESS(layout); const ui32 totalPartCount = info.Type.TotalPartCount(); const ui32 blobSubringSize = info.Type.BlobSubgroupSize(); @@ -216,7 +216,7 @@ void TStrategyBase::PreparePartLayout(const TBlobState &state, const TBlobStorag if (!isErrorDisk) { for (ui32 partIdx = beginPartIdx; partIdx < endPartIdx; ++partIdx) { TBlobState::ESituation partSituation = disk.DiskParts[partIdx].Situation; - bool isOnSlowDisk = (std::find(slowDiskIdxs.begin(), slowDiskIdxs.end(), diskIdx) != slowDiskIdxs.end()); + bool isOnSlowDisk = (slowDiskSubgroupMask & (1 << diskIdx)); if (partSituation == TBlobState::ESituation::Present || (!isOnSlowDisk && partSituation == TBlobState::ESituation::Sent)) { layout->VDiskPartMask[diskIdx] |= (1ul << partIdx); @@ -225,15 +225,7 @@ void TStrategyBase::PreparePartLayout(const TBlobState &state, const TBlobStorag } } } - if (slowDiskIdxs.empty()) { - layout->SlowVDiskMask = 0; - } else { - layout->SlowVDiskMask = 0; - for (ui32 slowDiskIdx : slowDiskIdxs) { - Y_DEBUG_ABORT_UNLESS(slowDiskIdx < sizeof(layout->SlowVDiskMask) * 8); - layout->SlowVDiskMask |= (1ull << slowDiskIdx); - } - } + layout->SlowVDiskMask = slowDiskSubgroupMask; } bool TStrategyBase::IsPutNeeded(const TBlobState &state, const TBlobStorageGroupType::TPartPlacement &partPlacement) { @@ -363,10 +355,10 @@ void TStrategyBase::Evaluate3dcSituation(const TBlobState &state, } } -void TStrategyBase::Prepare3dcPartPlacement(const TBlobState &state, - size_t numFailRealms, size_t numFailDomainsPerFailRealm, - ui8 preferredReplicasPerRealm, bool considerSlowAsError, - TBlobStorageGroupType::TPartPlacement &outPartPlacement) { +void TStrategyBase::Prepare3dcPartPlacement(const TBlobState& state, size_t numFailRealms, size_t numFailDomainsPerFailRealm, + ui8 preferredReplicasPerRealm, bool considerSlowAsError, bool replaceUnresponsive, + TBlobStorageGroupType::TPartPlacement& outPartPlacement, bool& fullPlacement) { + fullPlacement = true; for (size_t realm = 0; realm < numFailRealms; ++realm) { ui8 placed = 0; for (size_t domain = 0; placed < preferredReplicasPerRealm @@ -377,6 +369,10 @@ void TStrategyBase::Prepare3dcPartPlacement(const TBlobState &state, if (situation != TBlobState::ESituation::Error) { if (situation == TBlobState::ESituation::Present) { placed++; + } else if (situation == TBlobState::ESituation::Sent) { + if (!replaceUnresponsive) { + placed++; + } } else if (!considerSlowAsError || !disk.IsSlow) { if (situation != TBlobState::ESituation::Sent) { outPartPlacement.Records.emplace_back(subgroupIdx, realm); @@ -385,52 +381,29 @@ void TStrategyBase::Prepare3dcPartPlacement(const TBlobState &state, } } } + if (placed < preferredReplicasPerRealm) { + fullPlacement = false; + } } } -ui32 TStrategyBase::MakeSlowSubgroupDiskMask(TBlobState &state, const TBlobStorageGroupInfo &info, TBlackboard &blackboard, - bool isPut, const TAccelerationParams& accelerationParams) { - if (info.GetTotalVDisksNum() == 1) { - // when there is only one disk, we consider it not slow - return 0; - } - // Find the slowest disk +ui32 TStrategyBase::MakeSlowSubgroupDiskMask(TBlobState &state, TBlackboard &blackboard, bool isPut, + const TAccelerationParams& accelerationParams) { + // Find slow disks switch (blackboard.AccelerationMode) { - case TBlackboard::AccelerationModeSkipOneSlowest: { - TDiskDelayPredictions worstDisks; - state.GetWorstPredictedDelaysNs(info, *blackboard.GroupQueues, - (isPut ? HandleClassToQueueId(blackboard.PutHandleClass) : - HandleClassToQueueId(blackboard.GetHandleClass)), - &worstDisks, accelerationParams.PredictedDelayMultiplier); - - // Check if the slowest disk exceptionally slow, or just not very fast - ui32 slowDiskSubgroupMask = 0; - if (worstDisks[1].PredictedNs > 0 && worstDisks[0].PredictedNs > worstDisks[1].PredictedNs * - accelerationParams.SlowDiskThreshold) { - slowDiskSubgroupMask = 1 << worstDisks[0].DiskIdx; - } - - // Mark single slow disk - for (size_t diskIdx = 0; diskIdx < state.Disks.size(); ++diskIdx) { - state.Disks[diskIdx].IsSlow = false; - } - if (slowDiskSubgroupMask > 0) { - state.Disks[worstDisks[0].DiskIdx].IsSlow = true; - } - - return slowDiskSubgroupMask; - } - case TBlackboard::AccelerationModeSkipMarked: { - ui32 slowDiskSubgroupMask = 0; - for (size_t diskIdx = 0; diskIdx < state.Disks.size(); ++diskIdx) { - if (state.Disks[diskIdx].IsSlow) { - slowDiskSubgroupMask |= 1 << diskIdx; - } - } - return slowDiskSubgroupMask; + case TBlackboard::AccelerationModeSkipNSlowest: + blackboard.MarkSlowDisks(state, isPut, accelerationParams); + break; + case TBlackboard::AccelerationModeSkipMarked: + break; + } + ui32 slowDiskSubgroupMask = 0; + for (size_t diskIdx = 0; diskIdx < state.Disks.size(); ++diskIdx) { + if (state.Disks[diskIdx].IsSlow) { + slowDiskSubgroupMask |= 1 << diskIdx; } } - return 0; + return slowDiskSubgroupMask; } }//NKikimr diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_strategy_base.h b/ydb/core/blobstorage/dsproxy/dsproxy_strategy_base.h index ce03192fc93d..3e90c6641aa5 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_strategy_base.h +++ b/ydb/core/blobstorage/dsproxy/dsproxy_strategy_base.h @@ -34,7 +34,7 @@ class TStrategyBase : public IStrategy { void AddGetRequest(TLogContext &logCtx, TGroupDiskRequests &groupDiskRequests, TLogoBlobID &fullId, ui32 partIdx, TBlobState::TDisk &disk, TIntervalSet &intervalSet, const char *logMarker); void PreparePartLayout(const TBlobState &state, const TBlobStorageGroupInfo &info, - TBlobStorageGroupType::TPartLayout *layout, const TStackVec& slowDiskIdxs); + TBlobStorageGroupType::TPartLayout *layout, ui32 slowDiskSubgroupMask); bool IsPutNeeded(const TBlobState &state, const TBlobStorageGroupType::TPartPlacement &partPlacement); void PreparePutsForPartPlacement(TLogContext &logCtx, TBlobState &state, const TBlobStorageGroupInfo &info, TGroupDiskRequests &groupDiskRequests, @@ -47,14 +47,12 @@ class TStrategyBase : public IStrategy { TBlobStorageGroupInfo::TSubgroupVDisks &inOutSuccess, TBlobStorageGroupInfo::TSubgroupVDisks &inOutError, bool &outIsDegraded); - void Prepare3dcPartPlacement(const TBlobState &state, size_t numFailRealms, size_t numFailDomainsPerFailRealm, - ui8 preferredReplicasPerRealm, bool considerSlowAsError, - TBlobStorageGroupType::TPartPlacement &outPartPlacement); + void Prepare3dcPartPlacement(const TBlobState& state, size_t numFailRealms, size_t numFailDomainsPerFailRealm, + ui8 preferredReplicasPerRealm, bool considerSlowAsError, bool replaceUnresponsive, + TBlobStorageGroupType::TPartPlacement& outPartPlacement, bool& fullPlacement); // Sets IsSlow for the slow disk, resets for other disks. // returns bit mask with 1 on positions of slow disks - ui32 MakeSlowSubgroupDiskMask(TBlobState &state, const TBlobStorageGroupInfo &info, TBlackboard &blackboard, bool isPut, - const TAccelerationParams& accelerationParams); + ui32 MakeSlowSubgroupDiskMask(TBlobState &state, TBlackboard &blackboard, bool isPut, const TAccelerationParams& accelerationParams); }; - }//NKikimr diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_strategy_get_m3dc_basic.h b/ydb/core/blobstorage/dsproxy/dsproxy_strategy_get_m3dc_basic.h index a5782426c66d..c0e9c8178949 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_strategy_get_m3dc_basic.h +++ b/ydb/core/blobstorage/dsproxy/dsproxy_strategy_get_m3dc_basic.h @@ -71,35 +71,14 @@ namespace NKikimr { // issue request for a specific disk; returns true if the request was issued and not yet completed, otherwise // false - if (info.GetTotalVDisksNum() > 1) { - // find the slowest disk and mark it - switch (blackboard.AccelerationMode) { - case TBlackboard::AccelerationModeSkipOneSlowest: { - TDiskDelayPredictions worstDisks; - state.GetWorstPredictedDelaysNs(info, *blackboard.GroupQueues, - HandleClassToQueueId(blackboard.GetHandleClass), - &worstDisks, accelerationParams.PredictedDelayMultiplier); - - // Check if the slowest disk exceptionally slow, or just not very fast - i32 slowDiskSubgroupIdx = -1; - if (worstDisks[1].PredictedNs > 0 && worstDisks[0].PredictedNs > - worstDisks[1].PredictedNs * accelerationParams.SlowDiskThreshold) { - slowDiskSubgroupIdx = worstDisks[0].DiskIdx; - } - - // Mark single slow disk - for (size_t diskIdx = 0; diskIdx < state.Disks.size(); ++diskIdx) { - state.Disks[diskIdx].IsSlow = false; - } - if (slowDiskSubgroupIdx >= 0) { - state.Disks[slowDiskSubgroupIdx].IsSlow = true; - } - break; - } - case TBlackboard::AccelerationModeSkipMarked: - // The slowest disk is already marked! - break; - } + // mark slow disks + switch (blackboard.AccelerationMode) { + case TBlackboard::AccelerationModeSkipNSlowest: + blackboard.MarkSlowDisks(state, false, accelerationParams); + break; + case TBlackboard::AccelerationModeSkipMarked: + // Slow disks are already marked! + break; } // create an array defining order in which we traverse the disks diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_strategy_get_min_iops_block.h b/ydb/core/blobstorage/dsproxy/dsproxy_strategy_get_min_iops_block.h index 2d0efd8f8615..bf61ce78044f 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_strategy_get_min_iops_block.h +++ b/ydb/core/blobstorage/dsproxy/dsproxy_strategy_get_min_iops_block.h @@ -312,8 +312,7 @@ class TMinIopsBlockStrategy : public TStrategyBase { // Try excluding the slow disk bool isDone = false; // TODO: Mark disk that does not answer when accelerating requests - ui32 slowDiskSubgroupMask = MakeSlowSubgroupDiskMask(state, info, blackboard, false, - accelerationParams); + ui32 slowDiskSubgroupMask = MakeSlowSubgroupDiskMask(state, blackboard, false, accelerationParams); if (slowDiskSubgroupMask >= 0) { TBlobStorageGroupInfo::EBlobState fastPessimisticState = TBlobStorageGroupInfo::EBS_DISINTEGRATED; TBlobStorageGroupInfo::EBlobState fastOptimisticState = TBlobStorageGroupInfo::EBS_DISINTEGRATED; diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_strategy_put_m3dc.h b/ydb/core/blobstorage/dsproxy/dsproxy_strategy_put_m3dc.h index e6e9a376f720..b7b52fe37422 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_strategy_put_m3dc.h +++ b/ydb/core/blobstorage/dsproxy/dsproxy_strategy_put_m3dc.h @@ -36,14 +36,14 @@ class TPut3dcStrategy : public TStrategyBase { TBlobStorageGroupType::TPartPlacement partPlacement; bool degraded = false; bool isDone = false; - ui32 slowDiskSubgroupMask = MakeSlowSubgroupDiskMask(state, info, blackboard, true, accelerationParams); + ui32 slowDiskSubgroupMask = MakeSlowSubgroupDiskMask(state, blackboard, true, accelerationParams); do { if (slowDiskSubgroupMask == 0) { break; // ignore this case } TBlobStorageGroupInfo::TSubgroupVDisks success(&info.GetTopology()); TBlobStorageGroupInfo::TSubgroupVDisks error(&info.GetTopology()); - Evaluate3dcSituation(state, NumFailRealms, NumFailDomainsPerFailRealm, info, true, success, error, degraded); + Evaluate3dcSituation(state, NumFailRealms, NumFailDomainsPerFailRealm, info, false, success, error, degraded); TBlobStorageGroupInfo::TSubgroupVDisks slow = TBlobStorageGroupInfo::TSubgroupVDisks::CreateFromMask( &info.GetTopology(), slowDiskSubgroupMask); if ((success | error) & slow) { @@ -61,9 +61,7 @@ class TPut3dcStrategy : public TStrategyBase { // now check every realm and check if we have to issue some write requests to it Prepare3dcPartPlacement(state, NumFailRealms, NumFailDomainsPerFailRealm, - PreferredReplicasPerRealm(degraded), - true, partPlacement); - isDone = true; + PreferredReplicasPerRealm(degraded), true, false, partPlacement, isDone); } } while (false); if (!isDone) { @@ -81,9 +79,10 @@ class TPut3dcStrategy : public TStrategyBase { } // now check every realm and check if we have to issue some write requests to it + partPlacement.Records.clear(); + bool fullPlacement; Prepare3dcPartPlacement(state, NumFailRealms, NumFailDomainsPerFailRealm, - PreferredReplicasPerRealm(degraded), - false, partPlacement); + PreferredReplicasPerRealm(degraded), false, false, partPlacement, fullPlacement); } if (IsPutNeeded(state, partPlacement)) { PreparePutsForPartPlacement(logCtx, state, info, groupDiskRequests, partPlacement); diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_strategy_restore.h b/ydb/core/blobstorage/dsproxy/dsproxy_strategy_restore.h index 4a2026c9a86e..c31ed1e71b0a 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_strategy_restore.h +++ b/ydb/core/blobstorage/dsproxy/dsproxy_strategy_restore.h @@ -126,26 +126,13 @@ class TRestoreStrategy : public TStrategyBase { return *res; } - TStackVec slowDiskSubgroupIdxs; - if (info.GetTotalVDisksNum() > 1) { - // Find the slowest disk, if there are more than 1 - TDiskDelayPredictions worstDisks; - state.GetWorstPredictedDelaysNs(info, *blackboard.GroupQueues, - HandleClassToQueueId(blackboard.PutHandleClass), - &worstDisks, accelerationParams.PredictedDelayMultiplier); - - // Check if the slowest disk exceptionally slow, or just not very fast - if (worstDisks[1].PredictedNs > 0 && worstDisks[0].PredictedNs > worstDisks[1].PredictedNs * - accelerationParams.SlowDiskThreshold) { - slowDiskSubgroupIdxs.push_back(worstDisks[0].DiskIdx); - } - } + ui32 slowDiskSubgroupMask = MakeSlowSubgroupDiskMask(state, blackboard, true, accelerationParams); bool isDone = false; - if (!slowDiskSubgroupIdxs.empty()) { + if (slowDiskSubgroupMask != 0) { // If there is an exceptionally slow disk, try not touching it, mark isDone TBlobStorageGroupType::TPartLayout layout; - PreparePartLayout(state, info, &layout, slowDiskSubgroupIdxs); + PreparePartLayout(state, info, &layout, slowDiskSubgroupMask); TBlobStorageGroupType::TPartPlacement partPlacement; bool isCorrectable = info.Type.CorrectLayout(layout, partPlacement); @@ -159,7 +146,7 @@ class TRestoreStrategy : public TStrategyBase { if (!isDone) { // Fill in the part layout TBlobStorageGroupType::TPartLayout layout; - PreparePartLayout(state, info, &layout, {}); + PreparePartLayout(state, info, &layout, 0); TBlobStorageGroupType::TPartPlacement partPlacement; bool isCorrectable = info.Type.CorrectLayout(layout, partPlacement); Y_ABORT_UNLESS(isCorrectable); diff --git a/ydb/core/blobstorage/nodewarden/node_warden_impl.cpp b/ydb/core/blobstorage/nodewarden/node_warden_impl.cpp index fd1303c94477..904f2a2fa100 100644 --- a/ydb/core/blobstorage/nodewarden/node_warden_impl.cpp +++ b/ydb/core/blobstorage/nodewarden/node_warden_impl.cpp @@ -196,6 +196,7 @@ void TNodeWarden::Bootstrap() { icb->RegisterSharedControl(SlowDiskThreshold, "DSProxyControls.SlowDiskThreshold"); icb->RegisterSharedControl(PredictedDelayMultiplier, "DSProxyControls.PredictedDelayMultiplier"); + icb->RegisterSharedControl(MaxNumOfSlowDisks, "DSProxyControls.MaxNumOfSlowDisks"); } // start replication broker diff --git a/ydb/core/blobstorage/nodewarden/node_warden_impl.h b/ydb/core/blobstorage/nodewarden/node_warden_impl.h index be7518c4dec0..a1816435b01e 100644 --- a/ydb/core/blobstorage/nodewarden/node_warden_impl.h +++ b/ydb/core/blobstorage/nodewarden/node_warden_impl.h @@ -148,6 +148,7 @@ namespace NKikimr::NStorage { TControlWrapper SlowDiskThreshold; TControlWrapper PredictedDelayMultiplier; + TControlWrapper MaxNumOfSlowDisks; public: struct TGroupRecord; @@ -172,8 +173,9 @@ namespace NKikimr::NStorage { TCostMetricsParameters{50}, TCostMetricsParameters{32}, }) - , SlowDiskThreshold(2000, 1, 1000000) - , PredictedDelayMultiplier(1000, 1, 1000) + , SlowDiskThreshold(2'000, 1, 1'000'000) + , PredictedDelayMultiplier(1'000, 1, 1000) + , MaxNumOfSlowDisks(2, 1, 2) { Y_ABORT_UNLESS(Cfg->BlobStorageConfig.GetServiceSet().AvailabilityDomainsSize() <= 1); AvailDomainId = 1; diff --git a/ydb/core/blobstorage/nodewarden/node_warden_proxy.cpp b/ydb/core/blobstorage/nodewarden/node_warden_proxy.cpp index 37e051a42283..bb23b289bcc3 100644 --- a/ydb/core/blobstorage/nodewarden/node_warden_proxy.cpp +++ b/ydb/core/blobstorage/nodewarden/node_warden_proxy.cpp @@ -42,6 +42,7 @@ void TNodeWarden::StartLocalProxy(ui32 groupId) { .EnableVPatch = EnableVPatch, .SlowDiskThreshold = SlowDiskThreshold, .PredictedDelayMultiplier = PredictedDelayMultiplier, + .MaxNumOfSlowDisks = MaxNumOfSlowDisks, }), TMailboxType::ReadAsFilled, AppData()->SystemPoolId); [[fallthrough]]; case NKikimrBlobStorage::TGroupDecommitStatus::DONE: @@ -61,6 +62,7 @@ void TNodeWarden::StartLocalProxy(ui32 groupId) { .EnableVPatch = EnableVPatch, .SlowDiskThreshold = SlowDiskThreshold, .PredictedDelayMultiplier = PredictedDelayMultiplier, + .MaxNumOfSlowDisks = MaxNumOfSlowDisks, } ) ); @@ -72,6 +74,7 @@ void TNodeWarden::StartLocalProxy(ui32 groupId) { .EnableVPatch = EnableVPatch, .SlowDiskThreshold = SlowDiskThreshold, .PredictedDelayMultiplier = PredictedDelayMultiplier, + .MaxNumOfSlowDisks = MaxNumOfSlowDisks, })); } diff --git a/ydb/core/blobstorage/ut_blobstorage/acceleration.cpp b/ydb/core/blobstorage/ut_blobstorage/acceleration.cpp index 4e88bc051459..7af07d01efe3 100644 --- a/ydb/core/blobstorage/ut_blobstorage/acceleration.cpp +++ b/ydb/core/blobstorage/ut_blobstorage/acceleration.cpp @@ -139,7 +139,8 @@ Y_UNIT_TEST_SUITE(Acceleration) { }; struct TestCtx { - TestCtx(const TBlobStorageGroupType& erasure, float slowDiskThreshold, float delayMultiplier) + TestCtx(const TBlobStorageGroupType& erasure, float slowDiskThreshold, float delayMultiplier, + ui32 maxSlowCount = 2) : NodeCount(erasure.BlobSubgroupSize() + 1) , Erasure(erasure) , Env(new TEnvironmentSetup({ @@ -148,6 +149,7 @@ Y_UNIT_TEST_SUITE(Acceleration) { .LocationGenerator = [this](ui32 nodeId) { return LocationGenerator(nodeId); }, .SlowDiskThreshold = slowDiskThreshold, .VDiskPredictedDelayMultiplier = delayMultiplier, + .MaxNumOfSlowDisks = maxSlowCount, })) , VDiskDelayEmulator(new TVDiskDelayEmulator(Env)) {} @@ -242,31 +244,43 @@ Y_UNIT_TEST_SUITE(Acceleration) { void TestAcceleratePut(const TBlobStorageGroupType& erasure, ui32 slowDisksNum, NKikimrBlobStorage::EPutHandleClass handleClass, TDuration fastDelay, TDuration slowDelay, TDuration initDelay, TDuration waitTime, - float delayMultiplier) { + float delayMultiplier, ui32 maxSlowCount = 2) { ui32 initialRequests = 100; float slowDiskThreshold = 2; TDiskDelay fastDiskDelay = TDiskDelay(fastDelay); TDiskDelay slowDiskDelay = TDiskDelay(slowDelay); TDiskDelay initDiskDelay = TDiskDelay(initDelay); - for (ui32 fastDisksNum = 0; fastDisksNum < erasure.BlobSubgroupSize() - 2; ++fastDisksNum) { + ui32 requests = (erasure.GetErasure() == TBlobStorageGroupType::ErasureMirror3dc) ? 3 : 6; + + for (ui32 fastDisksNum = 0; fastDisksNum < requests - 1; ++fastDisksNum) { Ctest << "fastDisksNum# " << fastDisksNum << Endl; - TestCtx ctx(erasure, slowDiskThreshold, delayMultiplier); + TestCtx ctx(erasure, slowDiskThreshold, delayMultiplier, maxSlowCount); ctx.VDiskDelayEmulator->DefaultDelay = initDiskDelay; ctx.Initialize(); + bool verboseHandlers = false; TString data = MakeData(1024); - auto put = [&](TLogoBlobID blobId) { + auto put = [&](TLogoBlobID blobId, bool timeout) { ctx.Env->Runtime->WrapInActorContext(ctx.Edge, [&] { - SendToBSProxy(ctx.Edge, ctx.GroupId, new TEvBlobStorage::TEvPut(blobId, data, TInstant::Max()), handleClass); + TEvBlobStorage::TEvPut* ev = new TEvBlobStorage::TEvPut(blobId, data, TInstant::Max()); + if (verboseHandlers) { + Ctest << TAppData::TimeProvider->Now() << " Send TEvPut# " << ev->ToString() << Endl; + } + SendToBSProxy(ctx.Edge, ctx.GroupId, ev, handleClass); }); auto res = ctx.Env->WaitForEdgeActorEvent( - ctx.Edge, false, TAppData::TimeProvider->Now() + waitTime); - UNIT_ASSERT_C(res, "fastDisksNum# " << fastDisksNum); - UNIT_ASSERT_VALUES_EQUAL(res->Get()->Status, NKikimrProto::OK); + ctx.Edge, false, timeout ? (TAppData::TimeProvider->Now() + waitTime) : TInstant::Max()); + if (timeout) { + if (slowDisksNum <= maxSlowCount) { + UNIT_ASSERT_C(res, "fastDisksNum# " << fastDisksNum); + UNIT_ASSERT_VALUES_EQUAL(res->Get()->Status, NKikimrProto::OK); + } else { + UNIT_ASSERT_C(!res, "fastDisksNum# " << fastDisksNum); + } + } }; - bool verboseHandlers = false; ctx.VDiskDelayEmulator->AddHandler(TEvBlobStorage::TEvVPutResult::EventType, [&](std::unique_ptr& ev) { ui32 nodeId = ev->Sender.NodeId(); if (nodeId < ctx.NodeCount) { @@ -283,14 +297,14 @@ Y_UNIT_TEST_SUITE(Acceleration) { }); for (ui32 i = 0; i < initialRequests; ++i) { - put(TLogoBlobID(1, 1, 1, 1, data.size(), 123 + i)); + put(TLogoBlobID(1, 1, 1, 1, data.size(), 123 + i), false); } ctx.Env->Sim(slowDelay); std::deque delayByResponseOrder; for (ui32 i = 0; i < erasure.BlobSubgroupSize(); ++i) { - if (i >= fastDisksNum && i < fastDisksNum + slowDisksNum) { + if (i >= fastDisksNum && i < fastDisksNum + slowDisksNum) { delayByResponseOrder.push_back(slowDiskDelay); } else { delayByResponseOrder.push_back(fastDiskDelay); @@ -301,7 +315,8 @@ Y_UNIT_TEST_SUITE(Acceleration) { ctx.VDiskDelayEmulator->LogUnwrap = true; verboseHandlers = true; ADD_DSPROXY_MESSAGE_PRINTER(TEvBlobStorage::TEvVPut); - put(TLogoBlobID(1, 1, 1, 1, data.size(), 1)); + ADD_DSPROXY_MESSAGE_PRINTER(TEvBlobStorage::TEvPutResult); + put(TLogoBlobID(1, 1, 1, 1, data.size(), 1), true); } } @@ -309,16 +324,18 @@ Y_UNIT_TEST_SUITE(Acceleration) { void TestAccelerateGet(const TBlobStorageGroupType& erasure, ui32 slowDisksNum, NKikimrBlobStorage::EGetHandleClass handleClass, TDuration fastDelay, TDuration slowDelay, TDuration initDelay, TDuration waitTime, - float delayMultiplier) { + float delayMultiplier, ui32 maxSlowCount = 2) { ui32 initialRequests = 100; float slowDiskThreshold = 2; TDiskDelay fastDiskDelay = TDiskDelay(fastDelay); TDiskDelay slowDiskDelay = TDiskDelay(slowDelay); TDiskDelay initDiskDelay = TDiskDelay(initDelay); - for (ui32 fastDisksNum = 0; fastDisksNum < erasure.BlobSubgroupSize() - 2; ++fastDisksNum) { + ui32 requests = 3; + + for (ui32 fastDisksNum = 0; fastDisksNum < requests - 1; ++fastDisksNum) { Ctest << "fastDisksNum# " << fastDisksNum << Endl; - TestCtx ctx(erasure, slowDiskThreshold, delayMultiplier); + TestCtx ctx(erasure, slowDiskThreshold, delayMultiplier, maxSlowCount); ctx.VDiskDelayEmulator->DefaultDelay = initDiskDelay; ctx.Initialize(); @@ -340,7 +357,7 @@ Y_UNIT_TEST_SUITE(Acceleration) { }); TString data = MakeData(1024); - auto putAndGet = [&](TLogoBlobID blobId) { + auto putAndGet = [&](TLogoBlobID blobId, bool timeout) { ctx.Env->Runtime->WrapInActorContext(ctx.Edge, [&] { SendToBSProxy(ctx.Edge, ctx.GroupId, new TEvBlobStorage::TEvPut(blobId, data, TInstant::Max())); }); @@ -350,14 +367,21 @@ Y_UNIT_TEST_SUITE(Acceleration) { ctx.Env->Runtime->WrapInActorContext(ctx.Edge, [&] { SendToBSProxy(ctx.Edge, ctx.GroupId, new TEvBlobStorage::TEvGet(blobId, 0, data.size(), TInstant::Max(), handleClass)); }); - auto getRes = ctx.Env->WaitForEdgeActorEvent(ctx.Edge, false, TAppData::TimeProvider->Now() + waitTime); - UNIT_ASSERT_C(getRes, "fastDisksNum# " << fastDisksNum); - UNIT_ASSERT_VALUES_EQUAL(getRes->Get()->Status, NKikimrProto::OK); - UNIT_ASSERT_VALUES_EQUAL(getRes->Get()->Responses[0].Status, NKikimrProto::OK); + auto getRes = ctx.Env->WaitForEdgeActorEvent(ctx.Edge, false, + timeout ? (TAppData::TimeProvider->Now() + waitTime) : TInstant::Max()); + if (timeout) { + if (slowDisksNum <= maxSlowCount) { + UNIT_ASSERT_C(getRes, "fastDisksNum# " << fastDisksNum); + UNIT_ASSERT_VALUES_EQUAL(getRes->Get()->Status, NKikimrProto::OK); + UNIT_ASSERT_VALUES_EQUAL(getRes->Get()->Responses[0].Status, NKikimrProto::OK); + } else { + UNIT_ASSERT_C(!getRes, "fastDisksNum# " << fastDisksNum); + } + } }; for (ui32 i = 0; i < initialRequests; ++i) { - putAndGet(TLogoBlobID(1, 1, 1, 1, data.size(), 123 + i)); + putAndGet(TLogoBlobID(1, 1, 1, 1, data.size(), 123 + i), false); } ctx.Env->Sim(slowDelay); @@ -374,7 +398,8 @@ Y_UNIT_TEST_SUITE(Acceleration) { ctx.VDiskDelayEmulator->LogUnwrap = true; verboseHandlers = true; ADD_DSPROXY_MESSAGE_PRINTER(TEvBlobStorage::TEvVGet); - putAndGet(TLogoBlobID(1, 1, 1, 1, data.size(), 2)); + ADD_DSPROXY_MESSAGE_PRINTER(TEvBlobStorage::TEvGetResult); + putAndGet(TLogoBlobID(1, 1, 1, 1, data.size(), 2), true); } } @@ -424,7 +449,7 @@ Y_UNIT_TEST_SUITE(Acceleration) { } void TestThreshold(const TBlobStorageGroupType& erasure, ui32 slowDisks, bool delayPuts, bool delayGets, - TTestThresholdRequestSender sendRequests) { + TTestThresholdRequestSender sendRequests, float maxRatio) { float delayMultiplier = 1; float slowDiskThreshold = 1.2; TDiskDelay fastDiskDelay = TDiskDelay(TDuration::Seconds(0.1), 10, TDuration::Seconds(1), 1, "fast"); @@ -437,7 +462,7 @@ Y_UNIT_TEST_SUITE(Acceleration) { ui32 groupSize = erasure.BlobSubgroupSize(); std::vector nodeIsSlow(groupSize, true); - std::vector vputsByNode(groupSize, 0); + std::vector vrequestsByNode(groupSize, 0); for (ui32 i = 0; i < groupSize; ++i) { bool isSlow = (i % 3 == 0 && i / 3 < slowDisks); @@ -457,7 +482,7 @@ Y_UNIT_TEST_SUITE(Acceleration) { TDuration delay = ctx.VDiskDelayEmulator->DelayMsg(ev); Ctest << TAppData::TimeProvider->Now() << " TEvVPutResult: vdiskId# " << vdiskId.ToString() << " partId# " << partId.ToString() << " nodeId# " << nodeId << ", delay " << delay << Endl; - ++vputsByNode[nodeId - 1]; + ++vrequestsByNode[nodeId - 1]; return false; } return true; @@ -475,7 +500,7 @@ Y_UNIT_TEST_SUITE(Acceleration) { TDuration delay = ctx.VDiskDelayEmulator->DelayMsg(ev); Ctest << TAppData::TimeProvider->Now() << " TEvVGetResult: vdiskId# " << vdiskId.ToString() << " partId# " << partId.ToString() << " nodeId# " << nodeId << ", delay " << delay << Endl; - ++vputsByNode[nodeId - 1]; + ++vrequestsByNode[nodeId - 1]; return false; } return true; @@ -492,15 +517,15 @@ Y_UNIT_TEST_SUITE(Acceleration) { TStringStream str; - str << "VPUTS BY NODE: "; + str << "VRequests by node: "; for (ui32 i = 0; i < groupSize; ++i) { - str << "{ nodeId# " << i << " isSlow# " << nodeIsSlow[i] << ' ' << vputsByNode[i] << "}, "; + str << "{ nodeId# " << i << " isSlow# " << nodeIsSlow[i] << ' ' << vrequestsByNode[i] << "}, "; if (nodeIsSlow[i]) { ++slowNodesCount; - slowNodesRequests += vputsByNode[i]; + slowNodesRequests += vrequestsByNode[i]; } else { ++fastNodesCount; - fastNodesRequests += vputsByNode[i]; + fastNodesRequests += vrequestsByNode[i]; } } Ctest << str.Str() << Endl; @@ -508,15 +533,17 @@ Y_UNIT_TEST_SUITE(Acceleration) { double slowNodeRequestsAvg = 1. * slowNodesRequests / slowNodesCount; double fastNodeRequestsAvg = 1. * fastNodesRequests / fastNodesCount; - UNIT_ASSERT_LE_C(slowNodeRequestsAvg, fastNodeRequestsAvg / 3, str.Str()); + double ratio = fastNodeRequestsAvg / slowNodeRequestsAvg; + Ctest << "Fast to slow ratio# " << ratio << Endl; + UNIT_ASSERT_GE_C(ratio, maxRatio, "ratio# " << ratio << " " << str.Str()); } void TestThresholdPut(const TBlobStorageGroupType& erasure, ui32 slowDisks) { - TestThreshold(erasure, slowDisks, true, false, TestThresholdSendPutRequests); + TestThreshold(erasure, slowDisks, true, false, TestThresholdSendPutRequests, 5); } void TestThresholdGet(const TBlobStorageGroupType& erasure, ui32 slowDisks) { - TestThreshold(erasure, slowDisks, false, true, TestThresholdSendGetRequests); + TestThreshold(erasure, slowDisks, false, true, TestThresholdSendGetRequests, 1.5); } void TestDelayMultiplierPut(const TBlobStorageGroupType& erasure, ui32 slowDisks) { @@ -526,8 +553,17 @@ Y_UNIT_TEST_SUITE(Acceleration) { void TestDelayMultiplierGet(const TBlobStorageGroupType& erasure, ui32 slowDisks) { TestAccelerateGet(erasure, slowDisks, NKikimrBlobStorage::AsyncRead, TDuration::Seconds(0.9), - TDuration::Seconds(2 - ), TDuration::Seconds(1), TDuration::Seconds(1.95), 0.8); + TDuration::Seconds(2), TDuration::Seconds(1), TDuration::Seconds(1.95), 0.8); + } + + void TestMaxNumOfSlowDisksPut(const TBlobStorageGroupType& erasure, ui32 slowDisks) { + TestAcceleratePut(erasure, slowDisks, NKikimrBlobStorage::AsyncBlob, TDuration::Seconds(1), + TDuration::Seconds(5), TDuration::Seconds(1), TDuration::Seconds(4), 1, 1); + } + + void TestMaxNumOfSlowDisksGet(const TBlobStorageGroupType& erasure, ui32 slowDisks) { + TestAccelerateGet(erasure, slowDisks, NKikimrBlobStorage::AsyncRead, TDuration::Seconds(1), + TDuration::Seconds(5), TDuration::Seconds(1), TDuration::Seconds(4), 1, 1); } #define TEST_ACCELERATE(erasure, method, handleClass, slowDisks) \ @@ -536,6 +572,8 @@ Y_UNIT_TEST_SUITE(Acceleration) { TDuration::Seconds(1), TDuration::Seconds(5), TDuration::Seconds(1), TDuration::Seconds(4), 1); \ } + // TODO fix Acceleration in mirror-3-of-4 + TEST_ACCELERATE(Mirror3dc, Put, AsyncBlob, 1); // TEST_ACCELERATE(Mirror3of4, Put, AsyncBlob, 1); TEST_ACCELERATE(4Plus2Block, Put, AsyncBlob, 1); @@ -557,19 +595,17 @@ Y_UNIT_TEST_SUITE(Acceleration) { Test##param##method(TBlobStorageGroupType::Erasure##erasure, slowDisks); \ } -// TEST_ACCELERATE_PARAMS(Threshold, Put, Mirror3dc, 1); + TEST_ACCELERATE_PARAMS(Threshold, Put, Mirror3dc, 1); TEST_ACCELERATE_PARAMS(Threshold, Put, 4Plus2Block, 1); -// TEST_ACCELERATE_PARAMS(Threshold, Put, Mirror3dc, 2); -// TEST_ACCELERATE_PARAMS(Threshold, Put, 4Plus2Block, 2); + TEST_ACCELERATE_PARAMS(Threshold, Put, Mirror3dc, 2); + TEST_ACCELERATE_PARAMS(Threshold, Put, 4Plus2Block, 2); -// TEST_ACCELERATE_PARAMS(Threshold, Get, Mirror3dc, 1); + TEST_ACCELERATE_PARAMS(Threshold, Get, Mirror3dc, 1); TEST_ACCELERATE_PARAMS(Threshold, Get, 4Plus2Block, 1); -// TEST_ACCELERATE_PARAMS(Threshold, Get, Mirror3dc, 2); -// TEST_ACCELERATE_PARAMS(Threshold, Get, 4Plus2Block, 2); - - // TODO(serg-belyakov): fix all muted tests + TEST_ACCELERATE_PARAMS(Threshold, Get, Mirror3dc, 2); + TEST_ACCELERATE_PARAMS(Threshold, Get, 4Plus2Block, 2); TEST_ACCELERATE_PARAMS(DelayMultiplier, Put, Mirror3dc, 1); TEST_ACCELERATE_PARAMS(DelayMultiplier, Put, 4Plus2Block, 1); @@ -583,6 +619,15 @@ Y_UNIT_TEST_SUITE(Acceleration) { TEST_ACCELERATE_PARAMS(DelayMultiplier, Get, Mirror3dc, 2); TEST_ACCELERATE_PARAMS(DelayMultiplier, Get, 4Plus2Block, 2); + TEST_ACCELERATE_PARAMS(MaxNumOfSlowDisks, Get, Mirror3dc, 1); + TEST_ACCELERATE_PARAMS(MaxNumOfSlowDisks, Get, 4Plus2Block, 1); + + TEST_ACCELERATE_PARAMS(MaxNumOfSlowDisks, Put, Mirror3dc, 1); + TEST_ACCELERATE_PARAMS(MaxNumOfSlowDisks, Put, 4Plus2Block, 1); + + TEST_ACCELERATE_PARAMS(MaxNumOfSlowDisks, Put, Mirror3dc, 2); + TEST_ACCELERATE_PARAMS(MaxNumOfSlowDisks, Put, 4Plus2Block, 2); + #undef TEST_ACCELERATE #undef TEST_ACCELERATE_PARAMS #undef PRINT_DSPROXY_MESSAGE diff --git a/ydb/core/blobstorage/ut_blobstorage/lib/common.h b/ydb/core/blobstorage/ut_blobstorage/lib/common.h index 462d6f67861e..3ed6d98cbc61 100644 --- a/ydb/core/blobstorage/ut_blobstorage/lib/common.h +++ b/ydb/core/blobstorage/ut_blobstorage/lib/common.h @@ -62,7 +62,7 @@ class TWeightedRandom { public: TWeightedRandom(ui64 seed = 0) : PrefixSum({ 0 }) - , Mt64(seed) + , Mt64(new TMersenne(seed)) {} TWeightedRandom(const TWeightedRandom&) = default; @@ -77,7 +77,7 @@ class TWeightedRandom { T GetRandom() { Y_ABORT_UNLESS(WeightSum() != 0); - return Get(Mt64() % WeightSum()); + return Get((*Mt64)() % WeightSum()); } T Get(ui64 w) { @@ -95,5 +95,5 @@ class TWeightedRandom { private: std::vector Values; std::vector PrefixSum; - TMersenne Mt64; + std::shared_ptr> Mt64; }; diff --git a/ydb/core/blobstorage/ut_blobstorage/lib/env.h b/ydb/core/blobstorage/ut_blobstorage/lib/env.h index c3923969aa39..9f49cfcd0791 100644 --- a/ydb/core/blobstorage/ut_blobstorage/lib/env.h +++ b/ydb/core/blobstorage/ut_blobstorage/lib/env.h @@ -50,6 +50,7 @@ struct TEnvironmentSetup { const bool UseFakeConfigDispatcher = false; const float SlowDiskThreshold = 2; const float VDiskPredictedDelayMultiplier = 1; + const ui32 MaxNumOfSlowDisks = 2; }; const TSettings Settings; @@ -402,6 +403,8 @@ struct TEnvironmentSetup { ADD_ICB_CONTROL("DSProxyControls.SlowDiskThreshold", 2'000, 1, 1'000'000, std::round(Settings.SlowDiskThreshold * 1'000)); ADD_ICB_CONTROL("DSProxyControls.PredictedDelayMultiplier", 1'000, 1, 1'000'000, std::round(Settings.VDiskPredictedDelayMultiplier * 1'000)); + ADD_ICB_CONTROL("DSProxyControls.MaxNumOfSlowDisks", 2, 1, 2, Settings.MaxNumOfSlowDisks); + #undef ADD_ICB_CONTROL { diff --git a/ydb/core/protos/config.proto b/ydb/core/protos/config.proto index da20031f1c31..e6dd9a65fd6a 100644 --- a/ydb/core/protos/config.proto +++ b/ydb/core/protos/config.proto @@ -1310,15 +1310,43 @@ message TImmediateControlsConfig { message TDSProxyControls { optional uint64 SlowDiskThreshold = 1 [(ControlOptions) = { - Description: "The minimum ratio of slowest and second slowest disks, required to accelerate, actual value is divided by 1000", + Description: "The minimum ratio of slowest and second slowest disks, required to accelerate, promille", MinValue: 1, MaxValue: 1000000, DefaultValue: 2000 }]; optional uint64 PredictedDelayMultiplier = 2 [(ControlOptions) = { - Description: "Predicted time of VDisk's response is multiplied by this value divided by 1000", + Description: "Predicted time of VDisk's response multiplier, promille", MinValue: 0, MaxValue: 1000000, DefaultValue: 1000 }]; + reserved 3; + reserved 4; + optional uint64 MaxNumOfSlowDisks = 5 [(ControlOptions) = { + Description: "Maximum number of slow disks, which DSProxy can skip with Accelerations", + MinValue: 1, + MaxValue: 2, + DefaultValue: 2 }]; + } + + message TPDiskControls { + optional uint64 MaxCommonLogChunksHDD = 1 [(ControlOptions) = { + Description: "Regulate the maximum of log chunks on the PDisk, setting for HDD", + MinValue: 1, + MaxValue: 1000000, + DefaultValue: 200 }]; + optional uint64 MaxCommonLogChunksSSD = 2 [(ControlOptions) = { + Description: "Regulate the maximum of log chunks on the PDisk, setting for SSD, NVME", + MinValue: 1, + MaxValue: 1000000, + DefaultValue: 200 }]; + } + + message TBlobStorageControllerControls { + optional uint64 EnableSelfHealWithDegraded = 1 [(ControlOptions) = { + Description: "Should SelfHeal automatically process groups that are in DEGRADED status (one step from nonworking)", + MinValue: 0, + MaxValue: 1, + DefaultValue: 0 }]; } optional TDataShardControls DataShardControls = 1; From 42d4c2379c5f9a75c1e090fecfa949c172a756aa Mon Sep 17 00:00:00 2001 From: Sergey Belyakov Date: Tue, 19 Nov 2024 10:03:37 +0000 Subject: [PATCH 08/13] Fix merge errors --- ydb/core/blobstorage/nodewarden/node_warden_proxy.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ydb/core/blobstorage/nodewarden/node_warden_proxy.cpp b/ydb/core/blobstorage/nodewarden/node_warden_proxy.cpp index bb23b289bcc3..953821ea942d 100644 --- a/ydb/core/blobstorage/nodewarden/node_warden_proxy.cpp +++ b/ydb/core/blobstorage/nodewarden/node_warden_proxy.cpp @@ -58,6 +58,7 @@ void TNodeWarden::StartLocalProxy(ui32 groupId) { // create proxy with configuration proxy.reset(CreateBlobStorageGroupProxyConfigured(TIntrusivePtr(info), false, DsProxyNodeMon, getCounters(info), TBlobStorageProxyParameters{ + .UseActorSystemTimeInBSQueue = Cfg->UseActorSystemTimeInBSQueue, .EnablePutBatching = EnablePutBatching, .EnableVPatch = EnableVPatch, .SlowDiskThreshold = SlowDiskThreshold, @@ -70,6 +71,7 @@ void TNodeWarden::StartLocalProxy(ui32 groupId) { } else { // create proxy without configuration proxy.reset(CreateBlobStorageGroupProxyUnconfigured(groupId, DsProxyNodeMon, TBlobStorageProxyParameters{ + .UseActorSystemTimeInBSQueue = Cfg->UseActorSystemTimeInBSQueue, .EnablePutBatching = EnablePutBatching, .EnableVPatch = EnableVPatch, .SlowDiskThreshold = SlowDiskThreshold, From 550ba0c5da8bcbae81f63de869f23c0a00ee233d Mon Sep 17 00:00:00 2001 From: Sergey Belyakov Date: Mon, 18 Nov 2024 14:25:37 +0300 Subject: [PATCH 09/13] Separate acceleration ICB settings for HDD and SSD (#11590) --- ydb/core/blobstorage/common/defs.h | 4 ++ .../common/immediate_control_defaults.cpp | 14 ++++++ .../common/immediate_control_defaults.h | 18 ++++++++ ydb/core/blobstorage/common/ya.make | 11 +++++ ydb/core/blobstorage/dsproxy/dsproxy.h | 32 ++++++++------ ydb/core/blobstorage/dsproxy/dsproxy_impl.cpp | 20 +++------ ydb/core/blobstorage/dsproxy/dsproxy_impl.h | 8 +--- .../blobstorage/dsproxy/dsproxy_request.cpp | 9 ++-- .../blobstorage/dsproxy/dsproxy_state.cpp | 22 ++++++++-- .../dsproxy/ut/dsproxy_env_mock_ut.h | 10 ++--- .../blobstorage/dsproxy/ut_fat/dsproxy_ut.cpp | 10 ++--- .../dsproxy_fault_tolerance_ut_runtime.h | 10 ++--- ydb/core/blobstorage/dsproxy/ya.make | 1 + .../nodewarden/node_warden_impl.cpp | 9 ++++ .../blobstorage/nodewarden/node_warden_impl.h | 20 +++++++-- .../nodewarden/node_warden_proxy.cpp | 43 ++++++++++++------- ydb/core/blobstorage/nodewarden/ya.make | 1 + ydb/core/blobstorage/ut_blobstorage/lib/env.h | 6 +++ ydb/core/blobstorage/ut_group/main.cpp | 10 ++--- ydb/core/protos/config.proto | 32 ++++++++++++++ 20 files changed, 209 insertions(+), 81 deletions(-) create mode 100644 ydb/core/blobstorage/common/defs.h create mode 100644 ydb/core/blobstorage/common/immediate_control_defaults.cpp create mode 100644 ydb/core/blobstorage/common/immediate_control_defaults.h create mode 100644 ydb/core/blobstorage/common/ya.make diff --git a/ydb/core/blobstorage/common/defs.h b/ydb/core/blobstorage/common/defs.h new file mode 100644 index 000000000000..fe09e00a103d --- /dev/null +++ b/ydb/core/blobstorage/common/defs.h @@ -0,0 +1,4 @@ +#pragma once + +#include +#include diff --git a/ydb/core/blobstorage/common/immediate_control_defaults.cpp b/ydb/core/blobstorage/common/immediate_control_defaults.cpp new file mode 100644 index 000000000000..03749ff97bed --- /dev/null +++ b/ydb/core/blobstorage/common/immediate_control_defaults.cpp @@ -0,0 +1,14 @@ +#include "immediate_control_defaults.h" + +namespace NKikimr { + +TControlWrapper SlowDiskThresholdDefaultControl = + TControlWrapper(std::round(DefaultSlowDiskThreshold * 1000), 1, 1'000'000); + +TControlWrapper PredictedDelayMultiplierDefaultControl = + TControlWrapper(std::round(DefaultPredictedDelayMultiplier * 1000), 0, 1'000'000); + +TControlWrapper MaxNumOfSlowDisksDefaultControl = + TControlWrapper(DefaultMaxNumOfSlowDisks, 1, 2); + +} // namespace NKikimr diff --git a/ydb/core/blobstorage/common/immediate_control_defaults.h b/ydb/core/blobstorage/common/immediate_control_defaults.h new file mode 100644 index 000000000000..98377ffb84cf --- /dev/null +++ b/ydb/core/blobstorage/common/immediate_control_defaults.h @@ -0,0 +1,18 @@ +#pragma once + +#include "defs.h" +#include + +namespace NKikimr { + +constexpr bool DefaultEnablePutBatching = true; +constexpr bool DefaultEnableVPatch = false; + +constexpr float DefaultSlowDiskThreshold = 2; +constexpr float DefaultPredictedDelayMultiplier = 1; +constexpr ui32 DefaultMaxNumOfSlowDisks = 2; + +extern TControlWrapper SlowDiskThresholdDefaultControl; +extern TControlWrapper PredictedDelayMultiplierDefaultControl; +extern TControlWrapper MaxNumOfSlowDisksDefaultControl; +} diff --git a/ydb/core/blobstorage/common/ya.make b/ydb/core/blobstorage/common/ya.make new file mode 100644 index 000000000000..a257ed4e8607 --- /dev/null +++ b/ydb/core/blobstorage/common/ya.make @@ -0,0 +1,11 @@ +LIBRARY() + +PEERDIR( + ydb/core/base +) + +SRCS( + immediate_control_defaults.cpp +) + +END() diff --git a/ydb/core/blobstorage/dsproxy/dsproxy.h b/ydb/core/blobstorage/dsproxy/dsproxy.h index dc1db49ace20..da1050426b8a 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy.h +++ b/ydb/core/blobstorage/dsproxy/dsproxy.h @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -52,13 +53,6 @@ const ui32 MaxRequestSize = 1000; const ui32 MaskSizeBits = 32; -constexpr bool DefaultEnablePutBatching = true; -constexpr bool DefaultEnableVPatch = false; - -constexpr double DefaultSlowDiskThreshold = 2; -constexpr double DefaultPredictedDelayMultiplier = 1; -constexpr ui32 DefaultMaxNumOfSlowDisks = 2; - constexpr bool WithMovingPatchRequestToStaticNode = true; //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// @@ -846,14 +840,28 @@ IActor* CreateBlobStorageGroupAssimilateRequest(TBlobStorageGroupAssimilateParam IActor* CreateBlobStorageGroupEjectedProxy(ui32 groupId, TIntrusivePtr &nodeMon); +struct TBlobStorageProxyControlWrappers { + TMemorizableControlWrapper EnablePutBatching; + TMemorizableControlWrapper EnableVPatch; + +#define DEVICE_TYPE_SEPECIFIC_MEMORIZABLE_CONTROLS(prefix) \ + TMemorizableControlWrapper prefix = prefix##DefaultControl; \ + TMemorizableControlWrapper prefix##HDD = prefix##DefaultControl; \ + TMemorizableControlWrapper prefix##SSD = prefix##DefaultControl + + // Acceleration parameters + DEVICE_TYPE_SEPECIFIC_MEMORIZABLE_CONTROLS(SlowDiskThreshold); + DEVICE_TYPE_SEPECIFIC_MEMORIZABLE_CONTROLS(PredictedDelayMultiplier); + DEVICE_TYPE_SEPECIFIC_MEMORIZABLE_CONTROLS(MaxNumOfSlowDisks); + +#undef DEVICE_TYPE_SEPECIFIC_MEMORIZABLE_CONTROLS + +}; + struct TBlobStorageProxyParameters { bool UseActorSystemTimeInBSQueue = false; - const TControlWrapper& EnablePutBatching; - const TControlWrapper& EnableVPatch; - const TControlWrapper& SlowDiskThreshold; - const TControlWrapper& PredictedDelayMultiplier; - const TControlWrapper& MaxNumOfSlowDisks = TControlWrapper(DefaultMaxNumOfSlowDisks, 1, 2); + TBlobStorageProxyControlWrappers Controls; }; IActor* CreateBlobStorageGroupProxyConfigured(TIntrusivePtr&& info, diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_impl.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_impl.cpp index 339f0b5dfcc5..9975916fdcdb 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_impl.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_impl.cpp @@ -15,11 +15,7 @@ namespace NKikimr { , IsEjected(false) , ForceWaitAllDrives(forceWaitAllDrives) , UseActorSystemTimeInBSQueue(params.UseActorSystemTimeInBSQueue) - , EnablePutBatching(params.EnablePutBatching) - , EnableVPatch(params.EnableVPatch) - , SlowDiskThreshold(params.SlowDiskThreshold) - , PredictedDelayMultiplier(params.PredictedDelayMultiplier) - , MaxNumOfSlowDisks(params.MaxNumOfSlowDisks) + , Controls(std::move(params.Controls)) {} TBlobStorageGroupProxy::TBlobStorageGroupProxy(ui32 groupId, bool isEjected,TIntrusivePtr &nodeMon, @@ -29,20 +25,16 @@ namespace NKikimr { , IsEjected(isEjected) , ForceWaitAllDrives(false) , UseActorSystemTimeInBSQueue(params.UseActorSystemTimeInBSQueue) - , EnablePutBatching(params.EnablePutBatching) - , EnableVPatch(params.EnableVPatch) - , SlowDiskThreshold(params.SlowDiskThreshold) - , PredictedDelayMultiplier(params.PredictedDelayMultiplier) - , MaxNumOfSlowDisks(params.MaxNumOfSlowDisks) + , Controls(std::move(params.Controls)) {} IActor* CreateBlobStorageGroupEjectedProxy(ui32 groupId, TIntrusivePtr &nodeMon) { return new TBlobStorageGroupProxy(groupId, true, nodeMon, TBlobStorageProxyParameters{ - .EnablePutBatching = TControlWrapper(false, false, true), - .EnableVPatch = TControlWrapper(false, false, true), - .SlowDiskThreshold = TControlWrapper(2000, 1, 1000000), - .PredictedDelayMultiplier = TControlWrapper(1000, 1, 1000000), + .Controls = TBlobStorageProxyControlWrappers{ + .EnablePutBatching = TControlWrapper(false, false, true), + .EnableVPatch = TControlWrapper(false, false, true), + } } ); } diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_impl.h b/ydb/core/blobstorage/dsproxy/dsproxy_impl.h index d4afbd620774..e1b8e10f6229 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_impl.h +++ b/ydb/core/blobstorage/dsproxy/dsproxy_impl.h @@ -106,9 +106,6 @@ class TBlobStorageGroupProxy : public TActorBootstrapped TBatchedQueue BatchedGets[GetHandleClassCount]; TStackVec GetBatchedBucketQueue; - TMemorizableControlWrapper EnablePutBatching; - TMemorizableControlWrapper EnableVPatch; - TInstant EstablishingSessionStartTime; const TDuration MuteDuration = TDuration::Seconds(5); @@ -119,10 +116,7 @@ class TBlobStorageGroupProxy : public TActorBootstrapped bool HasInvalidGroupId() const { return GroupId.GetRawId() == Max(); } void ProcessInitQueue(); - // Acceleration parameters - TMemorizableControlWrapper SlowDiskThreshold; - TMemorizableControlWrapper PredictedDelayMultiplier; - TMemorizableControlWrapper MaxNumOfSlowDisks; + TBlobStorageProxyControlWrappers Controls; TAccelerationParams GetAccelerationParams(); diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_request.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_request.cpp index b3691e0f8e7b..bf88a2399ae4 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_request.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_request.cpp @@ -180,7 +180,10 @@ namespace NKikimr { Y_DEBUG_ABORT_UNLESS(MinREALHugeBlobInBytes); const ui32 partSize = Info->Type.PartSize(ev->Get()->Id); - if (EnablePutBatching && partSize < MinREALHugeBlobInBytes && partSize <= MaxBatchedPutSize) { + TInstant now = TActivationContext::Now(); + + if (Controls.EnablePutBatching.Update(now) && partSize < MinREALHugeBlobInBytes && + partSize <= MaxBatchedPutSize) { NKikimrBlobStorage::EPutHandleClass handleClass = ev->Get()->HandleClass; TEvBlobStorage::TEvPut::ETactic tactic = ev->Get()->Tactic; Y_ABORT_UNLESS((ui64)handleClass <= PutHandleClassCount); @@ -278,7 +281,7 @@ namespace NKikimr { .Event = ev->Get(), .ExecutionRelay = ev->Get()->ExecutionRelay }, - .UseVPatch = static_cast(EnableVPatch.Update(now)) + .UseVPatch = static_cast(Controls.EnableVPatch.Update(now)) }), ev->Get()->Deadline ); @@ -554,7 +557,7 @@ namespace NKikimr { ++*Mon->EventStopPutBatching; LWPROBE(DSProxyBatchedPutRequest, BatchedPutRequestCount, GroupId.GetRawId()); BatchedPutRequestCount = 0; - EnablePutBatching.Update(TActivationContext::Now()); + Controls.EnablePutBatching.Update(TActivationContext::Now()); } void TBlobStorageGroupProxy::Handle(TEvStopBatchingGetRequests::TPtr& ev) { diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_state.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_state.cpp index d0676f7c226a..3cb1b3fd448b 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_state.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_state.cpp @@ -323,12 +323,28 @@ namespace NKikimr { Send(ev->Sender, new TEvProxySessionsState(Sessions ? Sessions->GroupQueues : nullptr)); } +#define SELECT_CONTROL_BY_DEVICE_TYPE(prefix, info) \ +([&](NPDisk::EDeviceType deviceType) -> i64 { \ + TInstant now = TActivationContext::Now(); \ + switch (deviceType) { \ + case NPDisk::DEVICE_TYPE_ROT: \ + return Controls.prefix##HDD.Update(now); \ + case NPDisk::DEVICE_TYPE_SSD: \ + case NPDisk::DEVICE_TYPE_NVME: \ + return Controls.prefix##SSD.Update(now); \ + default: \ + return Controls.prefix.Update(now); \ + } \ +})(info ? info->GetDeviceType() : NPDisk::DEVICE_TYPE_UNKNOWN) + TAccelerationParams TBlobStorageGroupProxy::GetAccelerationParams() { return TAccelerationParams{ - .SlowDiskThreshold = .001f * SlowDiskThreshold.Update(TActivationContext::Now()), - .PredictedDelayMultiplier = .001f * PredictedDelayMultiplier.Update(TActivationContext::Now()), - .MaxNumOfSlowDisks = (ui32)MaxNumOfSlowDisks.Update(TActivationContext::Now()), + .SlowDiskThreshold = .001f * SELECT_CONTROL_BY_DEVICE_TYPE(SlowDiskThreshold, Info), + .PredictedDelayMultiplier = .001f * SELECT_CONTROL_BY_DEVICE_TYPE(PredictedDelayMultiplier, Info), + .MaxNumOfSlowDisks = static_cast(SELECT_CONTROL_BY_DEVICE_TYPE(MaxNumOfSlowDisks, Info)), }; } +#undef SELECT_CONTROL_BY_DEVICE_TYPE + } // NKikimr diff --git a/ydb/core/blobstorage/dsproxy/ut/dsproxy_env_mock_ut.h b/ydb/core/blobstorage/dsproxy/ut/dsproxy_env_mock_ut.h index 39a22ead6a94..51e3b7b2dad8 100644 --- a/ydb/core/blobstorage/dsproxy/ut/dsproxy_env_mock_ut.h +++ b/ydb/core/blobstorage/dsproxy/ut/dsproxy_env_mock_ut.h @@ -81,14 +81,12 @@ struct TDSProxyEnv { TIntrusivePtr storagePoolCounters = perPoolCounters.GetPoolCounters("pool_name"); TControlWrapper enablePutBatching(DefaultEnablePutBatching, false, true); TControlWrapper enableVPatch(DefaultEnableVPatch, false, true); - TControlWrapper slowDiskThreshold(DefaultSlowDiskThreshold * 1000, 1, 1000000); - TControlWrapper predictedDelayMultiplier(DefaultPredictedDelayMultiplier * 1000, 1, 1000000); IActor *dsproxy = CreateBlobStorageGroupProxyConfigured(TIntrusivePtr(Info), true, nodeMon, std::move(storagePoolCounters), TBlobStorageProxyParameters{ - .EnablePutBatching = enablePutBatching, - .EnableVPatch = enableVPatch, - .SlowDiskThreshold = slowDiskThreshold, - .PredictedDelayMultiplier = predictedDelayMultiplier, + .Controls = TBlobStorageProxyControlWrappers{ + .EnablePutBatching = enablePutBatching, + .EnableVPatch = enableVPatch, + } } ); TActorId actorId = runtime.Register(dsproxy, nodeIndex); diff --git a/ydb/core/blobstorage/dsproxy/ut_fat/dsproxy_ut.cpp b/ydb/core/blobstorage/dsproxy/ut_fat/dsproxy_ut.cpp index e66e40b95687..9b4730164dc5 100644 --- a/ydb/core/blobstorage/dsproxy/ut_fat/dsproxy_ut.cpp +++ b/ydb/core/blobstorage/dsproxy/ut_fat/dsproxy_ut.cpp @@ -4206,15 +4206,13 @@ class TBlobStorageProxyTest: public TTestBase { TIntrusivePtr storagePoolCounters = perPoolCounters.GetPoolCounters("pool_name"); TControlWrapper enablePutBatching(args.EnablePutBatching, false, true); TControlWrapper enableVPatch(DefaultEnableVPatch, false, true); - TControlWrapper slowDiskThreshold(DefaultSlowDiskThreshold * 1000, 1, 1000000); - TControlWrapper predictedDelayMultiplier(DefaultPredictedDelayMultiplier * 1000, 1, 1000000); std::unique_ptr proxyActor{CreateBlobStorageGroupProxyConfigured(TIntrusivePtr(bsInfo), false, dsProxyNodeMon, TIntrusivePtr(storagePoolCounters), TBlobStorageProxyParameters{ - .EnablePutBatching = enablePutBatching, - .EnableVPatch = enableVPatch, - .SlowDiskThreshold = slowDiskThreshold, - .PredictedDelayMultiplier = predictedDelayMultiplier, + .Controls = TBlobStorageProxyControlWrappers{ + .EnablePutBatching = enablePutBatching, + .EnableVPatch = enableVPatch, + } } ) }; diff --git a/ydb/core/blobstorage/dsproxy/ut_ftol/dsproxy_fault_tolerance_ut_runtime.h b/ydb/core/blobstorage/dsproxy/ut_ftol/dsproxy_fault_tolerance_ut_runtime.h index 1f5bb2805a00..b724bd073f3f 100644 --- a/ydb/core/blobstorage/dsproxy/ut_ftol/dsproxy_fault_tolerance_ut_runtime.h +++ b/ydb/core/blobstorage/dsproxy/ut_ftol/dsproxy_fault_tolerance_ut_runtime.h @@ -87,14 +87,12 @@ class TFaultToleranceTestRuntime { TIntrusivePtr storagePoolCounters = perPoolCounters.GetPoolCounters("pool_name"); TControlWrapper enablePutBatching(DefaultEnablePutBatching, false, true); TControlWrapper enableVPatch(DefaultEnableVPatch, false, true); - TControlWrapper slowDiskThreshold(DefaultSlowDiskThreshold * 1000, 1, 1000000); - TControlWrapper predictedDelayMultiplier(DefaultPredictedDelayMultiplier * 1000, 1, 1000000); IActor *dsproxy = CreateBlobStorageGroupProxyConfigured(TIntrusivePtr(GroupInfo), false, nodeMon, std::move(storagePoolCounters), TBlobStorageProxyParameters{ - .EnablePutBatching = enablePutBatching, - .EnableVPatch = enableVPatch, - .SlowDiskThreshold = slowDiskThreshold, - .PredictedDelayMultiplier = predictedDelayMultiplier, + .Controls = TBlobStorageProxyControlWrappers{ + .EnablePutBatching = enablePutBatching, + .EnableVPatch = enableVPatch, + } } ); setup->LocalServices.emplace_back(MakeBlobStorageProxyID(GroupInfo->GroupID), diff --git a/ydb/core/blobstorage/dsproxy/ya.make b/ydb/core/blobstorage/dsproxy/ya.make index c4096cbdd681..0dd9503a727b 100644 --- a/ydb/core/blobstorage/dsproxy/ya.make +++ b/ydb/core/blobstorage/dsproxy/ya.make @@ -62,6 +62,7 @@ PEERDIR( ydb/core/base ydb/core/blobstorage/backpressure ydb/core/blobstorage/base + ydb/core/blobstorage/common ydb/core/blobstorage/groupinfo ydb/core/blobstorage/storagepoolmon ydb/core/blobstorage/vdisk/ingress diff --git a/ydb/core/blobstorage/nodewarden/node_warden_impl.cpp b/ydb/core/blobstorage/nodewarden/node_warden_impl.cpp index 904f2a2fa100..2d619ce1272e 100644 --- a/ydb/core/blobstorage/nodewarden/node_warden_impl.cpp +++ b/ydb/core/blobstorage/nodewarden/node_warden_impl.cpp @@ -1,5 +1,6 @@ #include "node_warden_impl.h" +#include #include #include #include @@ -195,8 +196,16 @@ void TNodeWarden::Bootstrap() { "VDiskControls.DiskTimeAvailableScaleNVME"); icb->RegisterSharedControl(SlowDiskThreshold, "DSProxyControls.SlowDiskThreshold"); + icb->RegisterSharedControl(SlowDiskThresholdHDD, "DSProxyControls.SlowDiskThresholdHDD"); + icb->RegisterSharedControl(SlowDiskThresholdSSD, "DSProxyControls.SlowDiskThresholdSSD"); + icb->RegisterSharedControl(PredictedDelayMultiplier, "DSProxyControls.PredictedDelayMultiplier"); + icb->RegisterSharedControl(PredictedDelayMultiplierHDD, "DSProxyControls.PredictedDelayMultiplierHDD"); + icb->RegisterSharedControl(PredictedDelayMultiplierSSD, "DSProxyControls.PredictedDelayMultiplierSSD"); + icb->RegisterSharedControl(MaxNumOfSlowDisks, "DSProxyControls.MaxNumOfSlowDisks"); + icb->RegisterSharedControl(MaxNumOfSlowDisksHDD, "DSProxyControls.MaxNumOfSlowDisksHDD"); + icb->RegisterSharedControl(MaxNumOfSlowDisksSSD, "DSProxyControls.MaxNumOfSlowDisksSSD"); } // start replication broker diff --git a/ydb/core/blobstorage/nodewarden/node_warden_impl.h b/ydb/core/blobstorage/nodewarden/node_warden_impl.h index a1816435b01e..0388879d87f2 100644 --- a/ydb/core/blobstorage/nodewarden/node_warden_impl.h +++ b/ydb/core/blobstorage/nodewarden/node_warden_impl.h @@ -147,8 +147,16 @@ namespace NKikimr::NStorage { TCostMetricsParametersByMedia CostMetricsParametersByMedia; TControlWrapper SlowDiskThreshold; + TControlWrapper SlowDiskThresholdHDD; + TControlWrapper SlowDiskThresholdSSD; + TControlWrapper PredictedDelayMultiplier; + TControlWrapper PredictedDelayMultiplierHDD; + TControlWrapper PredictedDelayMultiplierSSD; + TControlWrapper MaxNumOfSlowDisks; + TControlWrapper MaxNumOfSlowDisksHDD; + TControlWrapper MaxNumOfSlowDisksSSD; public: struct TGroupRecord; @@ -173,9 +181,15 @@ namespace NKikimr::NStorage { TCostMetricsParameters{50}, TCostMetricsParameters{32}, }) - , SlowDiskThreshold(2'000, 1, 1'000'000) - , PredictedDelayMultiplier(1'000, 1, 1000) - , MaxNumOfSlowDisks(2, 1, 2) + , SlowDiskThreshold(std::round(DefaultSlowDiskThreshold * 1000), 1, 1'000'000) + , SlowDiskThresholdHDD(std::round(DefaultSlowDiskThreshold * 1000), 1, 1'000'000) + , SlowDiskThresholdSSD(std::round(DefaultSlowDiskThreshold * 1000), 1, 1'000'000) + , PredictedDelayMultiplier(std::round(DefaultPredictedDelayMultiplier * 1000), 0, 1'000'000) + , PredictedDelayMultiplierHDD(std::round(DefaultPredictedDelayMultiplier * 1000), 0, 1'000'000) + , PredictedDelayMultiplierSSD(std::round(DefaultPredictedDelayMultiplier * 1000), 0, 1'000'000) + , MaxNumOfSlowDisks(DefaultMaxNumOfSlowDisks, 1, 2) + , MaxNumOfSlowDisksHDD(DefaultMaxNumOfSlowDisks, 1, 2) + , MaxNumOfSlowDisksSSD(DefaultMaxNumOfSlowDisks, 1, 2) { Y_ABORT_UNLESS(Cfg->BlobStorageConfig.GetServiceSet().AvailabilityDomainsSize() <= 1); AvailDomainId = 1; diff --git a/ydb/core/blobstorage/nodewarden/node_warden_proxy.cpp b/ydb/core/blobstorage/nodewarden/node_warden_proxy.cpp index 953821ea942d..6ef49e31ea83 100644 --- a/ydb/core/blobstorage/nodewarden/node_warden_proxy.cpp +++ b/ydb/core/blobstorage/nodewarden/node_warden_proxy.cpp @@ -8,6 +8,11 @@ TActorId TNodeWarden::StartEjectedProxy(ui32 groupId) { return Register(CreateBlobStorageGroupEjectedProxy(groupId, DsProxyNodeMon), TMailboxType::ReadAsFilled, AppData()->SystemPoolId); } +#define ADD_CONTROLS_FOR_DEVICE_TYPES(prefix) \ + .prefix = prefix, \ + .prefix##HDD = prefix##HDD, \ + .prefix##SSD = prefix##SSD + void TNodeWarden::StartLocalProxy(ui32 groupId) { STLOG(PRI_DEBUG, BS_NODE, NW12, "StartLocalProxy", (GroupId, groupId)); @@ -38,11 +43,13 @@ void TNodeWarden::StartLocalProxy(ui32 groupId) { TIntrusivePtr(info), false, DsProxyNodeMon, getCounters(info), TBlobStorageProxyParameters{ .UseActorSystemTimeInBSQueue = Cfg->UseActorSystemTimeInBSQueue, - .EnablePutBatching = EnablePutBatching, - .EnableVPatch = EnableVPatch, - .SlowDiskThreshold = SlowDiskThreshold, - .PredictedDelayMultiplier = PredictedDelayMultiplier, - .MaxNumOfSlowDisks = MaxNumOfSlowDisks, + .Controls = TBlobStorageProxyControlWrappers{ + .EnablePutBatching = EnablePutBatching, + .EnableVPatch = EnableVPatch, + ADD_CONTROLS_FOR_DEVICE_TYPES(SlowDiskThreshold), + ADD_CONTROLS_FOR_DEVICE_TYPES(PredictedDelayMultiplier), + ADD_CONTROLS_FOR_DEVICE_TYPES(MaxNumOfSlowDisks), + } }), TMailboxType::ReadAsFilled, AppData()->SystemPoolId); [[fallthrough]]; case NKikimrBlobStorage::TGroupDecommitStatus::DONE: @@ -59,11 +66,13 @@ void TNodeWarden::StartLocalProxy(ui32 groupId) { proxy.reset(CreateBlobStorageGroupProxyConfigured(TIntrusivePtr(info), false, DsProxyNodeMon, getCounters(info), TBlobStorageProxyParameters{ .UseActorSystemTimeInBSQueue = Cfg->UseActorSystemTimeInBSQueue, - .EnablePutBatching = EnablePutBatching, - .EnableVPatch = EnableVPatch, - .SlowDiskThreshold = SlowDiskThreshold, - .PredictedDelayMultiplier = PredictedDelayMultiplier, - .MaxNumOfSlowDisks = MaxNumOfSlowDisks, + .Controls = TBlobStorageProxyControlWrappers{ + .EnablePutBatching = EnablePutBatching, + .EnableVPatch = EnableVPatch, + ADD_CONTROLS_FOR_DEVICE_TYPES(SlowDiskThreshold), + ADD_CONTROLS_FOR_DEVICE_TYPES(PredictedDelayMultiplier), + ADD_CONTROLS_FOR_DEVICE_TYPES(MaxNumOfSlowDisks), + } } ) ); @@ -72,11 +81,13 @@ void TNodeWarden::StartLocalProxy(ui32 groupId) { // create proxy without configuration proxy.reset(CreateBlobStorageGroupProxyUnconfigured(groupId, DsProxyNodeMon, TBlobStorageProxyParameters{ .UseActorSystemTimeInBSQueue = Cfg->UseActorSystemTimeInBSQueue, - .EnablePutBatching = EnablePutBatching, - .EnableVPatch = EnableVPatch, - .SlowDiskThreshold = SlowDiskThreshold, - .PredictedDelayMultiplier = PredictedDelayMultiplier, - .MaxNumOfSlowDisks = MaxNumOfSlowDisks, + .Controls = TBlobStorageProxyControlWrappers{ + .EnablePutBatching = EnablePutBatching, + .EnableVPatch = EnableVPatch, + ADD_CONTROLS_FOR_DEVICE_TYPES(SlowDiskThreshold), + ADD_CONTROLS_FOR_DEVICE_TYPES(PredictedDelayMultiplier), + ADD_CONTROLS_FOR_DEVICE_TYPES(MaxNumOfSlowDisks), + } })); } @@ -180,3 +191,5 @@ void TNodeWarden::Handle(NNodeWhiteboard::TEvWhiteboard::TEvBSGroupStateUpdate:: TActivationContext::Send(ev->Forward(WhiteboardId)); } } + +#undef ADD_CONTROLS_FOR_DEVICE_TYPES diff --git a/ydb/core/blobstorage/nodewarden/ya.make b/ydb/core/blobstorage/nodewarden/ya.make index 0f0790d4bca2..bfb4e2cd5227 100644 --- a/ydb/core/blobstorage/nodewarden/ya.make +++ b/ydb/core/blobstorage/nodewarden/ya.make @@ -37,6 +37,7 @@ PEERDIR( library/cpp/openssl/crypto ydb/core/base ydb/core/blob_depot/agent + ydb/core/blobstorage/common ydb/core/blobstorage/crypto ydb/core/blobstorage/groupinfo ydb/core/blobstorage/pdisk diff --git a/ydb/core/blobstorage/ut_blobstorage/lib/env.h b/ydb/core/blobstorage/ut_blobstorage/lib/env.h index 9f49cfcd0791..942a64e8b1e8 100644 --- a/ydb/core/blobstorage/ut_blobstorage/lib/env.h +++ b/ydb/core/blobstorage/ut_blobstorage/lib/env.h @@ -402,8 +402,14 @@ struct TEnvironmentSetup { ADD_ICB_CONTROL("VDiskControls.DiskTimeAvailableScaleNVME", 1'000, 1, 1'000'000, std::round(Settings.DiskTimeAvailableScale * 1'000)); ADD_ICB_CONTROL("DSProxyControls.SlowDiskThreshold", 2'000, 1, 1'000'000, std::round(Settings.SlowDiskThreshold * 1'000)); + ADD_ICB_CONTROL("DSProxyControls.SlowDiskThresholdHDD", 2'000, 1, 1'000'000, std::round(Settings.SlowDiskThreshold * 1'000)); + ADD_ICB_CONTROL("DSProxyControls.SlowDiskThresholdSSD", 2'000, 1, 1'000'000, std::round(Settings.SlowDiskThreshold * 1'000)); ADD_ICB_CONTROL("DSProxyControls.PredictedDelayMultiplier", 1'000, 1, 1'000'000, std::round(Settings.VDiskPredictedDelayMultiplier * 1'000)); + ADD_ICB_CONTROL("DSProxyControls.PredictedDelayMultiplierHDD", 1'000, 1, 1'000'000, std::round(Settings.VDiskPredictedDelayMultiplier * 1'000)); + ADD_ICB_CONTROL("DSProxyControls.PredictedDelayMultiplierSSD", 1'000, 1, 1'000'000, std::round(Settings.VDiskPredictedDelayMultiplier * 1'000)); ADD_ICB_CONTROL("DSProxyControls.MaxNumOfSlowDisks", 2, 1, 2, Settings.MaxNumOfSlowDisks); + ADD_ICB_CONTROL("DSProxyControls.MaxNumOfSlowDisksHDD", 2, 1, 2, Settings.MaxNumOfSlowDisks); + ADD_ICB_CONTROL("DSProxyControls.MaxNumOfSlowDisksSSD", 2, 1, 2, Settings.MaxNumOfSlowDisks); #undef ADD_ICB_CONTROL diff --git a/ydb/core/blobstorage/ut_group/main.cpp b/ydb/core/blobstorage/ut_group/main.cpp index 2dfada62548c..0f8c6a5a5db3 100644 --- a/ydb/core/blobstorage/ut_group/main.cpp +++ b/ydb/core/blobstorage/ut_group/main.cpp @@ -409,14 +409,12 @@ class TTestEnv { StoragePoolCounters = MakeIntrusive(proxy, TString(), NPDisk::DEVICE_TYPE_SSD); TControlWrapper enablePutBatching(DefaultEnablePutBatching, false, true); TControlWrapper enableVPatch(DefaultEnableVPatch, false, true); - TControlWrapper slowDiskThreshold(DefaultSlowDiskThreshold * 1000, 1, 1000000); - TControlWrapper predictedDelayMultiplier(DefaultPredictedDelayMultiplier * 1000, 1, 1000000); std::unique_ptr proxyActor{CreateBlobStorageGroupProxyConfigured(TIntrusivePtr(Info), false, mon, TIntrusivePtr(StoragePoolCounters), TBlobStorageProxyParameters{ - .EnablePutBatching = enablePutBatching, - .EnableVPatch = enableVPatch, - .SlowDiskThreshold = slowDiskThreshold, - .PredictedDelayMultiplier = predictedDelayMultiplier, + .Controls = TBlobStorageProxyControlWrappers{ + .EnablePutBatching = enablePutBatching, + .EnableVPatch = enableVPatch, + } })}; const TActorId& actorId = runtime.Register(proxyActor.release(), TActorId(), 0, std::nullopt, 1); runtime.RegisterService(MakeBlobStorageProxyID(GroupId), actorId); diff --git a/ydb/core/protos/config.proto b/ydb/core/protos/config.proto index e6dd9a65fd6a..b9924115229e 100644 --- a/ydb/core/protos/config.proto +++ b/ydb/core/protos/config.proto @@ -1326,6 +1326,38 @@ message TImmediateControlsConfig { MinValue: 1, MaxValue: 2, DefaultValue: 2 }]; + + optional uint64 SlowDiskThresholdHDD = 6 [(ControlOptions) = { + Description: "The minimum ratio of slowest and second slowest disks, required to accelerate, promille, option for HDD", + MinValue: 1, + MaxValue: 1000000, + DefaultValue: 2000 }]; + optional uint64 PredictedDelayMultiplierHDD = 7 [(ControlOptions) = { + Description: "Predicted time of VDisk's response multiplier, promille, option for HDD", + MinValue: 0, + MaxValue: 1000000, + DefaultValue: 1000 }]; + optional uint64 MaxNumOfSlowDisksHDD = 8 [(ControlOptions) = { + Description: "Maximum number of slow disks, which DSProxy can skip with Accelerations, option for HDD", + MinValue: 1, + MaxValue: 2, + DefaultValue: 2 }]; + + optional uint64 SlowDiskThresholdSSD = 9 [(ControlOptions) = { + Description: "The minimum ratio of slowest and second slowest disks, required to accelerate, promille, option for SSD", + MinValue: 1, + MaxValue: 1000000, + DefaultValue: 2000 }]; + optional uint64 PredictedDelayMultiplierSSD = 10 [(ControlOptions) = { + Description: "Predicted time of VDisk's response multiplier, promille, option for SSD", + MinValue: 0, + MaxValue: 1000000, + DefaultValue: 1000 }]; + optional uint64 MaxNumOfSlowDisksSSD = 11 [(ControlOptions) = { + Description: "Maximum number of slow disks, which DSProxy can skip with Accelerations, option for SSD", + MinValue: 1, + MaxValue: 2, + DefaultValue: 2 }]; } message TPDiskControls { From c50eac9cc6d421dd4dfc75fbf20b617aa92cd963 Mon Sep 17 00:00:00 2001 From: Sergey Belyakov Date: Tue, 19 Nov 2024 12:50:44 +0000 Subject: [PATCH 10/13] Revert "Create separate trace id for BlobStorage requests and link them to original traces (#6444)" This reverts commit c2e94d8a63fd1c4d290011b3c44c672c57b539f2. --- ydb/core/base/blobstorage.cpp | 111 ------------------ ydb/core/base/blobstorage.h | 24 ---- ydb/core/blobstorage/dsproxy/dsproxy.h | 50 ++++---- .../dsproxy/dsproxy_assimilate.cpp | 3 +- .../blobstorage/dsproxy/dsproxy_block.cpp | 7 +- .../blobstorage/dsproxy/dsproxy_collect.cpp | 3 +- .../blobstorage/dsproxy/dsproxy_discover.cpp | 3 +- .../dsproxy/dsproxy_discover_m3dc.cpp | 3 +- .../dsproxy/dsproxy_discover_m3of4.cpp | 3 +- ydb/core/blobstorage/dsproxy/dsproxy_get.cpp | 7 +- .../dsproxy/dsproxy_indexrestoreget.cpp | 7 +- .../dsproxy/dsproxy_multicollect.cpp | 3 +- .../blobstorage/dsproxy/dsproxy_multiget.cpp | 3 +- .../blobstorage/dsproxy/dsproxy_patch.cpp | 7 +- ydb/core/blobstorage/dsproxy/dsproxy_put.cpp | 7 +- .../blobstorage/dsproxy/dsproxy_range.cpp | 3 +- .../blobstorage/dsproxy/dsproxy_request.cpp | 32 ++--- .../blobstorage/dsproxy/dsproxy_status.cpp | 3 +- ydb/library/actors/wilson/wilson_span.h | 4 - ydb/library/actors/wilson/wilson_trace.h | 4 - 20 files changed, 79 insertions(+), 208 deletions(-) diff --git a/ydb/core/base/blobstorage.cpp b/ydb/core/base/blobstorage.cpp index b891d7777d1e..59bfb48fa560 100644 --- a/ydb/core/base/blobstorage.cpp +++ b/ydb/core/base/blobstorage.cpp @@ -1,6 +1,4 @@ #include "blobstorage.h" -#include -#include namespace NKikimr { @@ -46,12 +44,6 @@ bool operator<(const TPDiskCategory x, const TPDiskCategory y) { return std::make_tuple(x.Type(), x.Kind()) < std::make_tuple(y.Type(), y.Kind()); } -void TEvBlobStorage::TEvPut::ToSpan(NWilson::TSpan& span) const { - span - .Attribute("Id", Id.ToString()) - .Attribute("PutHandleClass", NKikimrBlobStorage::EPutHandleClass_Name(HandleClass)); -} - std::unique_ptr TEvBlobStorage::TEvPut::MakeErrorResponse( NKikimrProto::EReplyStatus status, const TString& errorReason, TGroupId groupId) { auto res = std::make_unique(status, Id, TStorageStatusFlags(), groupId, 0.0f); @@ -59,36 +51,6 @@ std::unique_ptr TEvBlobStorage::TEvPut::MakeErrorR return res; } -void TEvBlobStorage::TEvGet::ToSpan(NWilson::TSpan& span) const { - i64 totalSize = 0; - for (ui32 i = 0; i < QuerySize; ++i) { - const auto& q = Queries[i]; - if (q.Shift < q.Id.BlobSize()) { - totalSize += Min(q.Id.BlobSize() - q.Shift, q.Size ? q.Size : Max()); - } - } - - span - .Attribute("TotalSize", totalSize) - .Attribute("GetHandleClass", NKikimrBlobStorage::EGetHandleClass_Name(GetHandleClass)) - .Attribute("MustRestoreFirst", MustRestoreFirst) - .Attribute("IsIndexOnly", IsIndexOnly); - - if (span.GetTraceId().GetVerbosity() >= TWilson::DsProxyInternals) { - NWilson::TArrayValue queries; - queries.reserve(QuerySize); - for (ui32 i = 0; i < QuerySize; ++i) { - const auto& q = Queries[i]; - queries.emplace_back(NWilson::TKeyValueList{{ - {"Id", q.Id.ToString()}, - {"Shift", q.Shift}, - {"Size", q.Size}, - }}); - } - span.Attribute("Queries", std::move(queries)); - } -} - std::unique_ptr TEvBlobStorage::TEvGet::MakeErrorResponse( NKikimrProto::EReplyStatus status, const TString& errorReason, TGroupId groupId) { auto res = std::make_unique(status, QuerySize, groupId); @@ -105,12 +67,6 @@ std::unique_ptr TEvBlobStorage::TEvGet::MakeErrorR return res; } -void TEvBlobStorage::TEvBlock::ToSpan(NWilson::TSpan& span) const { - span - .Attribute("TabletId", ::ToString(TabletId)) - .Attribute("Generation", Generation); -} - std::unique_ptr TEvBlobStorage::TEvBlock::MakeErrorResponse( NKikimrProto::EReplyStatus status, const TString& errorReason, TGroupId /*groupId*/) { auto res = std::make_unique(status); @@ -118,13 +74,6 @@ std::unique_ptr TEvBlobStorage::TEvBlock::MakeEr return res; } -void TEvBlobStorage::TEvPatch::ToSpan(NWilson::TSpan& span) const { - span - .Attribute("OriginalGroupId", OriginalGroupId) - .Attribute("OriginalId", OriginalId.ToString()) - .Attribute("PatchedId", PatchedId.ToString()); -} - std::unique_ptr TEvBlobStorage::TEvPatch::MakeErrorResponse( NKikimrProto::EReplyStatus status, const TString& errorReason, TGroupId groupId) { auto res = std::make_unique(status, PatchedId, TStorageStatusFlags(), groupId, 0.0f); @@ -132,9 +81,6 @@ std::unique_ptr TEvBlobStorage::TEvPatch::MakeEr return res; } -void TEvBlobStorage::TEvInplacePatch::ToSpan(NWilson::TSpan& /*span*/) const { -} - std::unique_ptr TEvBlobStorage::TEvInplacePatch::MakeErrorResponse( NKikimrProto::EReplyStatus status, const TString& errorReason) { auto res = std::make_unique(status, PatchedId, TStorageStatusFlags(), 0.0f); @@ -142,12 +88,6 @@ std::unique_ptr TEvBlobStorage::TEvInplac return res; } -void TEvBlobStorage::TEvDiscover::ToSpan(NWilson::TSpan& span) const { - span - .Attribute("TabletId", ::ToString(TabletId)) - .Attribute("ReadBody", ReadBody); -} - std::unique_ptr TEvBlobStorage::TEvDiscover::MakeErrorResponse( NKikimrProto::EReplyStatus status, const TString& errorReason, TGroupId/*groupId*/) { auto res = std::make_unique(status, MinGeneration, 0); @@ -155,15 +95,6 @@ std::unique_ptr TEvBlobStorage::TEvDiscover:: return res; } -void TEvBlobStorage::TEvRange::ToSpan(NWilson::TSpan& span) const { - span - .Attribute("TabletId", ::ToString(TabletId)) - .Attribute("From", From.ToString()) - .Attribute("To", To.ToString()) - .Attribute("MustRestoreFirst", MustRestoreFirst) - .Attribute("IsIndexOnly", IsIndexOnly); -} - std::unique_ptr TEvBlobStorage::TEvRange::MakeErrorResponse( NKikimrProto::EReplyStatus status, const TString& errorReason, TGroupId groupId) { auto res = std::make_unique(status, From, To, groupId); @@ -171,42 +102,6 @@ std::unique_ptr TEvBlobStorage::TEvRange::MakeEr return res; } -void TEvBlobStorage::TEvCollectGarbage::ToSpan(NWilson::TSpan& span) const { - span - .Attribute("TabletId", ::ToString(TabletId)) - .Attribute("RecordGeneration", RecordGeneration) - .Attribute("PerGenerationCounter", PerGenerationCounter) - .Attribute("Channel", Channel); - - if (Collect) { - span - .Attribute("CollectGeneration", CollectGeneration) - .Attribute("CollectStep", CollectStep); - } - - if (span.GetTraceId().GetVerbosity() >= TWilson::DsProxyInternals) { - auto vector = [&](const auto& name, const auto& v) { - if (v) { - NWilson::TArrayValue items; - items.reserve(v->size()); - for (const TLogoBlobID& id : *v) { - items.emplace_back(id.ToString()); - } - span.Attribute(name, std::move(items)); - } - }; - vector("Keep", Keep); - vector("DoNotKeep", DoNotKeep); - } else { - if (Keep) { - span.Attribute("NumKeep", static_cast(Keep->size())); - } - if (DoNotKeep) { - span.Attribute("NumDoNotKeep", static_cast(DoNotKeep->size())); - } - } -} - std::unique_ptr TEvBlobStorage::TEvCollectGarbage::MakeErrorResponse( NKikimrProto::EReplyStatus status, const TString& errorReason, TGroupId /*groupId*/) { auto res = std::make_unique(status, TabletId, RecordGeneration, PerGenerationCounter, Channel); @@ -214,9 +109,6 @@ std::unique_ptr TEvBlobStorage::TEvColl return res; } -void TEvBlobStorage::TEvStatus::ToSpan(NWilson::TSpan& /*span*/) const -{} - std::unique_ptr TEvBlobStorage::TEvStatus::MakeErrorResponse( NKikimrProto::EReplyStatus status, const TString& errorReason, TGroupId /*groupId*/) { auto res = std::make_unique(status, TStorageStatusFlags()); @@ -224,9 +116,6 @@ std::unique_ptr TEvBlobStorage::TEvStatus::Make return res; } -void TEvBlobStorage::TEvAssimilate::ToSpan(NWilson::TSpan& /*span*/) const -{} - std::unique_ptr TEvBlobStorage::TEvAssimilate::MakeErrorResponse( NKikimrProto::EReplyStatus status, const TString& errorReason, TGroupId/*groupId*/) { return std::make_unique(status, errorReason); diff --git a/ydb/core/base/blobstorage.h b/ydb/core/base/blobstorage.h index 68e1b91cc273..7d018a9e1e66 100644 --- a/ydb/core/base/blobstorage.h +++ b/ydb/core/base/blobstorage.h @@ -25,10 +25,6 @@ #include -namespace NWilson { - class TSpan; -} // NWilson - namespace NKikimr { static constexpr ui32 MaxProtobufSize = 67108000; @@ -1002,8 +998,6 @@ struct TEvBlobStorage { return sizeof(*this) + Buffer.GetSize(); } - void ToSpan(NWilson::TSpan& span) const; - std::unique_ptr MakeErrorResponse(NKikimrProto::EReplyStatus status, const TString& errorReason, TGroupId groupId); }; @@ -1207,8 +1201,6 @@ struct TEvBlobStorage { return sizeof(*this) + QuerySize * sizeof(TQuery); } - void ToSpan(NWilson::TSpan& span) const; - std::unique_ptr MakeErrorResponse(NKikimrProto::EReplyStatus status, const TString& errorReason, TGroupId groupId); @@ -1362,8 +1354,6 @@ struct TEvBlobStorage { return sizeof(*this); } - void ToSpan(NWilson::TSpan& span) const; - std::unique_ptr MakeErrorResponse(NKikimrProto::EReplyStatus status, const TString& errorReason, TGroupId groupId); }; @@ -1561,8 +1551,6 @@ struct TEvBlobStorage { return sizeof(*this) + sizeof(TDiff) * DiffCount; } - void ToSpan(NWilson::TSpan& span) const; - std::unique_ptr MakeErrorResponse(NKikimrProto::EReplyStatus status, const TString& errorReason, TGroupId groupId); }; @@ -1653,8 +1641,6 @@ struct TEvBlobStorage { return sizeof(*this) + sizeof(TDiff) * DiffCount; } - void ToSpan(NWilson::TSpan& span) const; - std::unique_ptr MakeErrorResponse(NKikimrProto::EReplyStatus status, const TString& errorReason); }; @@ -1742,8 +1728,6 @@ struct TEvBlobStorage { return sizeof(*this); } - void ToSpan(NWilson::TSpan& span) const; - std::unique_ptr MakeErrorResponse(NKikimrProto::EReplyStatus status, const TString& errorReason, TGroupId groupId); }; @@ -1849,8 +1833,6 @@ struct TEvBlobStorage { return sizeof(*this); } - void ToSpan(NWilson::TSpan& span) const; - std::unique_ptr MakeErrorResponse(NKikimrProto::EReplyStatus status, const TString& errorReason, TGroupId groupId); }; @@ -2046,8 +2028,6 @@ struct TEvBlobStorage { return sizeof(*this) + ((Keep ? Keep->size() : 0) + (DoNotKeep ? DoNotKeep->size() : 0)) * sizeof(TLogoBlobID); } - void ToSpan(NWilson::TSpan& span) const; - std::unique_ptr MakeErrorResponse(NKikimrProto::EReplyStatus status, const TString& errorReason, TGroupId groupId); }; @@ -2116,8 +2096,6 @@ struct TEvBlobStorage { return sizeof(*this); } - void ToSpan(NWilson::TSpan& span) const; - std::unique_ptr MakeErrorResponse(NKikimrProto::EReplyStatus status, const TString& errorReason, TGroupId groupId); }; @@ -2193,8 +2171,6 @@ struct TEvBlobStorage { return sizeof(*this); } - void ToSpan(NWilson::TSpan& span) const; - std::unique_ptr MakeErrorResponse(NKikimrProto::EReplyStatus status, const TString& errorReason, TGroupId groupId); }; diff --git a/ydb/core/blobstorage/dsproxy/dsproxy.h b/ydb/core/blobstorage/dsproxy/dsproxy.h index da1050426b8a..8169c564283b 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy.h +++ b/ydb/core/blobstorage/dsproxy/dsproxy.h @@ -188,7 +188,7 @@ class TBlobStorageGroupRequestActor : public TActor { TInstant Now; TIntrusivePtr& StoragePoolCounters; ui32 RestartCounter; - NWilson::TTraceId TraceId = {}; + NWilson::TSpan Span; TDerived* Event = nullptr; std::shared_ptr ExecutionRelay = nullptr; @@ -211,7 +211,7 @@ class TBlobStorageGroupRequestActor : public TActor { , Mon(std::move(params.Common.Mon)) , PoolCounters(params.Common.StoragePoolCounters) , LogCtx(params.TypeSpecific.LogComponent, params.Common.LogAccEnabled) - , ParentSpan(TWilson::BlobStorage, std::move(params.Common.TraceId), params.TypeSpecific.Name) + , Span(std::move(params.Common.Span)) , RestartCounter(params.Common.RestartCounter) , CostModel(GroupQueues->CostModel) , Source(params.Common.Source) @@ -222,16 +222,9 @@ class TBlobStorageGroupRequestActor : public TActor { , ExecutionRelay(std::move(params.Common.ExecutionRelay)) { TDerived::ActiveCounter(Mon)->Inc(); - - if (ParentSpan) { - const NWilson::TTraceId& parentTraceId = ParentSpan.GetTraceId(); - Span = NWilson::TSpan(TWilson::BlobStorage, NWilson::TTraceId::NewTraceId(parentTraceId.GetVerbosity(), - parentTraceId.GetTimeToLive()), ParentSpan.GetName()); - ParentSpan.Link(Span.GetTraceId()); - Span.Attribute("GroupId", Info->GroupID.GetRawId()); - Span.Attribute("RestartCounter", RestartCounter); - params.Common.Event->ToSpan(Span); - } + Span + .Attribute("GroupId", Info->GroupID.GetRawId()) + .Attribute("RestartCounter", RestartCounter); Y_ABORT_UNLESS(CostModel); } @@ -593,10 +586,8 @@ class TBlobStorageGroupRequestActor : public TActor { if (term) { if (status == NKikimrProto::OK) { - ParentSpan.EndOk(); Span.EndOk(); } else { - ParentSpan.EndError(errorReason); Span.EndError(std::move(errorReason)); } } @@ -642,7 +633,6 @@ class TBlobStorageGroupRequestActor : public TActor { TIntrusivePtr Mon; TIntrusivePtr PoolCounters; TLogContext LogCtx; - NWilson::TSpan ParentSpan; NWilson::TSpan Span; TStackVec, 16> Responsiveness; TString ErrorReason; @@ -688,7 +678,7 @@ struct TBlobStorageGroupRangeParameters { , }; }; -IActor* CreateBlobStorageGroupRangeRequest(TBlobStorageGroupRangeParameters params); +IActor* CreateBlobStorageGroupRangeRequest(TBlobStorageGroupRangeParameters params, NWilson::TTraceId traceId); struct TBlobStorageGroupPutParameters { TBlobStorageGroupRequestActor::TCommonParameters Common; @@ -702,7 +692,7 @@ struct TBlobStorageGroupPutParameters { bool EnableRequestMod3x3ForMinLatency; TAccelerationParams AccelerationParams; }; -IActor* CreateBlobStorageGroupPutRequest(TBlobStorageGroupPutParameters params); +IActor* CreateBlobStorageGroupPutRequest(TBlobStorageGroupPutParameters params, NWilson::TTraceId traceId); struct TBlobStorageGroupMultiPutParameters { TBlobStorageGroupRequestActor::TCommonParameters Common; @@ -728,7 +718,7 @@ struct TBlobStorageGroupMultiPutParameters { return maxRestarts; } }; -IActor* CreateBlobStorageGroupPutRequest(TBlobStorageGroupMultiPutParameters params); +IActor* CreateBlobStorageGroupPutRequest(TBlobStorageGroupMultiPutParameters params, NWilson::TTraceId traceId); struct TBlobStorageGroupGetParameters { TBlobStorageGroupRequestActor::TCommonParameters Common; @@ -740,7 +730,7 @@ struct TBlobStorageGroupGetParameters { TNodeLayoutInfoPtr NodeLayout; TAccelerationParams AccelerationParams; }; -IActor* CreateBlobStorageGroupGetRequest(TBlobStorageGroupGetParameters params); +IActor* CreateBlobStorageGroupGetRequest(TBlobStorageGroupGetParameters params, NWilson::TTraceId traceId); struct TBlobStorageGroupPatchParameters { TBlobStorageGroupRequestActor::TCommonParameters Common; @@ -752,7 +742,7 @@ struct TBlobStorageGroupPatchParameters { bool UseVPatch = false; }; -IActor* CreateBlobStorageGroupPatchRequest(TBlobStorageGroupPatchParameters params); +IActor* CreateBlobStorageGroupPatchRequest(TBlobStorageGroupPatchParameters params, NWilson::TTraceId traceId); struct TBlobStorageGroupMultiGetParameters { TBlobStorageGroupRequestActor::TCommonParameters Common; @@ -763,7 +753,7 @@ struct TBlobStorageGroupMultiGetParameters { }; bool UseVPatch = false; }; -IActor* CreateBlobStorageGroupMultiGetRequest(TBlobStorageGroupMultiGetParameters params); +IActor* CreateBlobStorageGroupMultiGetRequest(TBlobStorageGroupMultiGetParameters params, NWilson::TTraceId traceId); struct TBlobStorageGroupRestoreGetParameters { TBlobStorageGroupRequestActor::TCommonParameters Common; @@ -773,7 +763,7 @@ struct TBlobStorageGroupRestoreGetParameters { .Activity = NKikimrServices::TActivity::BS_PROXY_INDEXRESTOREGET_ACTOR, }; }; -IActor* CreateBlobStorageGroupIndexRestoreGetRequest(TBlobStorageGroupRestoreGetParameters params); +IActor* CreateBlobStorageGroupIndexRestoreGetRequest(TBlobStorageGroupRestoreGetParameters params, NWilson::TTraceId traceId); struct TBlobStorageGroupDiscoverParameters { TBlobStorageGroupRequestActor::TCommonParameters Common; @@ -783,9 +773,9 @@ struct TBlobStorageGroupDiscoverParameters { .Activity = NKikimrServices::TActivity::BS_GROUP_DISCOVER, }; }; -IActor* CreateBlobStorageGroupDiscoverRequest(TBlobStorageGroupDiscoverParameters params); -IActor* CreateBlobStorageGroupMirror3dcDiscoverRequest(TBlobStorageGroupDiscoverParameters params); -IActor* CreateBlobStorageGroupMirror3of4DiscoverRequest(TBlobStorageGroupDiscoverParameters params); +IActor* CreateBlobStorageGroupDiscoverRequest(TBlobStorageGroupDiscoverParameters params, NWilson::TTraceId traceId); +IActor* CreateBlobStorageGroupMirror3dcDiscoverRequest(TBlobStorageGroupDiscoverParameters params, NWilson::TTraceId traceId); +IActor* CreateBlobStorageGroupMirror3of4DiscoverRequest(TBlobStorageGroupDiscoverParameters params, NWilson::TTraceId traceId); struct TBlobStorageGroupCollectGarbageParameters { TBlobStorageGroupRequestActor::TCommonParameters Common; @@ -795,7 +785,7 @@ struct TBlobStorageGroupCollectGarbageParameters { .Activity = NKikimrServices::TActivity::BS_GROUP_COLLECT_GARBAGE, }; }; -IActor* CreateBlobStorageGroupCollectGarbageRequest(TBlobStorageGroupCollectGarbageParameters params); +IActor* CreateBlobStorageGroupCollectGarbageRequest(TBlobStorageGroupCollectGarbageParameters params, NWilson::TTraceId traceId); struct TBlobStorageGroupMultiCollectParameters { TBlobStorageGroupRequestActor::TCommonParameters Common; @@ -805,7 +795,7 @@ struct TBlobStorageGroupMultiCollectParameters { .Activity = NKikimrServices::TActivity::BS_PROXY_MULTICOLLECT_ACTOR, }; }; -IActor* CreateBlobStorageGroupMultiCollectRequest(TBlobStorageGroupMultiCollectParameters params); +IActor* CreateBlobStorageGroupMultiCollectRequest(TBlobStorageGroupMultiCollectParameters params, NWilson::TTraceId traceId); struct TBlobStorageGroupBlockParameters { TBlobStorageGroupRequestActor::TCommonParameters Common; @@ -815,7 +805,7 @@ struct TBlobStorageGroupBlockParameters { .Activity = NKikimrServices::TActivity::BS_GROUP_BLOCK, }; }; -IActor* CreateBlobStorageGroupBlockRequest(TBlobStorageGroupBlockParameters params); +IActor* CreateBlobStorageGroupBlockRequest(TBlobStorageGroupBlockParameters params, NWilson::TTraceId traceId); struct TBlobStorageGroupStatusParameters { TBlobStorageGroupRequestActor::TCommonParameters Common; @@ -825,7 +815,7 @@ struct TBlobStorageGroupStatusParameters { .Activity = NKikimrServices::TActivity::BS_PROXY_STATUS_ACTOR, }; }; -IActor* CreateBlobStorageGroupStatusRequest(TBlobStorageGroupStatusParameters params); +IActor* CreateBlobStorageGroupStatusRequest(TBlobStorageGroupStatusParameters params, NWilson::TTraceId traceId); struct TBlobStorageGroupAssimilateParameters { TBlobStorageGroupRequestActor::TCommonParameters Common; @@ -836,7 +826,7 @@ struct TBlobStorageGroupAssimilateParameters { .Activity = NKikimrServices::TActivity::BS_GROUP_ASSIMILATE, }; }; -IActor* CreateBlobStorageGroupAssimilateRequest(TBlobStorageGroupAssimilateParameters params); +IActor* CreateBlobStorageGroupAssimilateRequest(TBlobStorageGroupAssimilateParameters params, NWilson::TTraceId traceId); IActor* CreateBlobStorageGroupEjectedProxy(ui32 groupId, TIntrusivePtr &nodeMon); diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_assimilate.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_assimilate.cpp index 863ee6c01c34..07d6fa488b73 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_assimilate.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_assimilate.cpp @@ -459,7 +459,8 @@ class TBlobStorageGroupAssimilateRequest : public TBlobStorageGroupRequestActor< } }; -IActor* CreateBlobStorageGroupAssimilateRequest(TBlobStorageGroupAssimilateParameters params) { +IActor* CreateBlobStorageGroupAssimilateRequest(TBlobStorageGroupAssimilateParameters params, NWilson::TTraceId traceId) { + params.Common.Span = NWilson::TSpan(TWilson::BlobStorage, std::move(traceId), "DSProxy.Assimilate"); return new TBlobStorageGroupAssimilateRequest(params); } diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_block.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_block.cpp index 5f93958da094..9edbc3e81c3d 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_block.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_block.cpp @@ -169,7 +169,12 @@ class TBlobStorageGroupBlockRequest : public TBlobStorageGroupRequestActorToString()); + } + params.Common.Span = std::move(span); return new TBlobStorageGroupBlockRequest(params); } diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_collect.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_collect.cpp index b829103e2345..ee18d2fc3e34 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_collect.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_collect.cpp @@ -200,7 +200,8 @@ class TBlobStorageGroupCollectGarbageRequest : public TBlobStorageGroupRequestAc } }; -IActor* CreateBlobStorageGroupCollectGarbageRequest(TBlobStorageGroupCollectGarbageParameters params) { +IActor* CreateBlobStorageGroupCollectGarbageRequest(TBlobStorageGroupCollectGarbageParameters params, NWilson::TTraceId traceId) { + params.Common.Span = NWilson::TSpan(TWilson::BlobStorage, std::move(traceId), "DSProxy.CollectGarbage"); return new TBlobStorageGroupCollectGarbageRequest(params); } diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_discover.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_discover.cpp index deeaf7a5b65a..000a70105a2a 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_discover.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_discover.cpp @@ -967,7 +967,8 @@ class TBlobStorageGroupDiscoverRequest : public TBlobStorageGroupRequestActorToString()); + } + params.Common.Span = std::move(span); return new TBlobStorageGroupGetRequest(params); } diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_indexrestoreget.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_indexrestoreget.cpp index fb858dfee97c..6e3c2e729a49 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_indexrestoreget.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_indexrestoreget.cpp @@ -388,7 +388,12 @@ class TBlobStorageGroupIndexRestoreGetRequest } }; -IActor* CreateBlobStorageGroupIndexRestoreGetRequest(TBlobStorageGroupRestoreGetParameters params) { +IActor* CreateBlobStorageGroupIndexRestoreGetRequest(TBlobStorageGroupRestoreGetParameters params, NWilson::TTraceId traceId) { + NWilson::TSpan span(TWilson::BlobStorage, std::move(traceId), "DSProxy.IndexRestoreGet"); + if (span) { + span.Attribute("event", ev->ToString()); + } + params.Common.Span = std::move(span); return new TBlobStorageGroupIndexRestoreGetRequest(params); } diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_multicollect.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_multicollect.cpp index efa0e98077bb..dc10e454030e 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_multicollect.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_multicollect.cpp @@ -209,7 +209,8 @@ class TBlobStorageGroupMultiCollectRequest } }; -IActor* CreateBlobStorageGroupMultiCollectRequest(TBlobStorageGroupMultiCollectParameters params) { +IActor* CreateBlobStorageGroupMultiCollectRequest(TBlobStorageGroupMultiCollectParameters params, NWilson::TTraceId traceId) { + params.Common.Span = NWilson::TSpan(TWilson::BlobStorage, std::move(traceId), "DSProxy.MultiCollect"); return new TBlobStorageGroupMultiCollectRequest(params); } diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_multiget.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_multiget.cpp index 31f2deea55e8..cc882b41d36c 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_multiget.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_multiget.cpp @@ -202,7 +202,8 @@ class TBlobStorageGroupMultiGetRequest : public TBlobStorageGroupRequestActorToString()); + } + params.Common.Span = std::move(span); return new TBlobStorageGroupPatchRequest(params); } diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_put.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_put.cpp index 5197b3a6e41f..e2f635778120 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_put.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_put.cpp @@ -749,7 +749,12 @@ class TBlobStorageGroupPutRequest : public TBlobStorageGroupRequestActorToString()); + } + params.Common.Span = std::move(span); return new TBlobStorageGroupPutRequest(params); } diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_range.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_range.cpp index 3132b366b051..6f4843037ec1 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_range.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_range.cpp @@ -396,7 +396,8 @@ class TBlobStorageGroupRangeRequest : public TBlobStorageGroupRequestActorGet()->RestartCounter, - .TraceId = std::move(ev->TraceId), .Event = ev->Get(), .ExecutionRelay = ev->Get()->ExecutionRelay, } - }), + }, std::move(ev->TraceId)), ev->Get()->Deadline ); } else { @@ -104,7 +103,6 @@ namespace NKikimr { .Now = TActivationContext::Now(), .StoragePoolCounters = StoragePoolCounters, .RestartCounter = ev->Get()->RestartCounter, - .TraceId = std::move(ev->TraceId), .Event = ev->Get(), .ExecutionRelay = ev->Get()->ExecutionRelay, .LogAccEnabled = ev->Get()->IsVerboseNoDataEnabled || ev->Get()->CollectDebugInfo, @@ -112,7 +110,7 @@ namespace NKikimr { }, .NodeLayout = TNodeLayoutInfoPtr(NodeLayoutInfo), .AccelerationParams = GetAccelerationParams(), - }), + }, std::move(ev->TraceId)), ev->Get()->Deadline ); } else { @@ -228,7 +226,7 @@ namespace NKikimr { .Stats = PerDiskStats, .EnableRequestMod3x3ForMinLatency = enableRequestMod3x3ForMinLatency, .AccelerationParams = GetAccelerationParams(), - }), + }, std::move(ev->TraceId)), ev->Get()->Deadline ); } @@ -248,11 +246,10 @@ namespace NKikimr { .Now = TActivationContext::Now(), .StoragePoolCounters = StoragePoolCounters, .RestartCounter = ev->Get()->RestartCounter, - .TraceId = std::move(ev->TraceId), .Event = ev->Get(), .ExecutionRelay = ev->Get()->ExecutionRelay } - }), + }, std::move(ev->TraceId)), ev->Get()->Deadline ); } @@ -277,12 +274,11 @@ namespace NKikimr { .Now = now, .StoragePoolCounters = StoragePoolCounters, .RestartCounter = ev->Get()->RestartCounter, - .TraceId = std::move(ev->TraceId), .Event = ev->Get(), .ExecutionRelay = ev->Get()->ExecutionRelay }, .UseVPatch = static_cast(Controls.EnableVPatch.Update(now)) - }), + }, std::move(ev->TraceId)), ev->Get()->Deadline ); } @@ -315,13 +311,12 @@ namespace NKikimr { .Now = TActivationContext::Now(), .StoragePoolCounters = StoragePoolCounters, .RestartCounter = ev->Get()->RestartCounter, - .TraceId = std::move(ev->TraceId), .Event = ev->Get(), .ExecutionRelay = ev->Get()->ExecutionRelay, .LogAccEnabled = (erasure != TBlobStorageGroupType::ErasureMirror3dc) && (erasure != TBlobStorageGroupType::ErasureMirror3of4) } - }), + }, std::move(ev->TraceId)), ev->Get()->Deadline ); } @@ -345,11 +340,10 @@ namespace NKikimr { .Now = TActivationContext::Now(), .StoragePoolCounters = StoragePoolCounters, .RestartCounter = ev->Get()->RestartCounter, - .TraceId = std::move(ev->TraceId), .Event = ev->Get(), .ExecutionRelay = ev->Get()->ExecutionRelay } - }), + }, std::move(ev->TraceId)), ev->Get()->Deadline ); } @@ -390,11 +384,10 @@ namespace NKikimr { .Now = TActivationContext::Now(), .StoragePoolCounters = StoragePoolCounters, .RestartCounter = ev->Get()->RestartCounter, - .TraceId = std::move(ev->TraceId), .Event = ev->Get(), .ExecutionRelay = ev->Get()->ExecutionRelay } - }), + }, std::move(ev->TraceId)), ev->Get()->Deadline ); } @@ -419,11 +412,10 @@ namespace NKikimr { .Now = TActivationContext::Now(), .StoragePoolCounters = StoragePoolCounters, .RestartCounter = ev->Get()->RestartCounter, - .TraceId = std::move(ev->TraceId), .Event = ev->Get(), .ExecutionRelay = ev->Get()->ExecutionRelay } - }), + }, std::move(ev->TraceId)), TInstant::Max() ); } @@ -442,11 +434,10 @@ namespace NKikimr { .Now = TActivationContext::Now(), .StoragePoolCounters = StoragePoolCounters, .RestartCounter = ev->Get()->RestartCounter, - .TraceId = std::move(ev->TraceId), .Event = ev->Get(), .ExecutionRelay = ev->Get()->ExecutionRelay } - }), + }, std::move(ev->TraceId)), TInstant::Max() ); } @@ -493,7 +484,6 @@ namespace NKikimr { .Now = TActivationContext::Now(), .StoragePoolCounters = StoragePoolCounters, .RestartCounter = ev->Get()->RestartCounter, - .TraceId = std::move(ev->TraceId), .Event = ev->Get(), .ExecutionRelay = ev->Get()->ExecutionRelay, .LatencyQueueKind = kind, @@ -502,7 +492,7 @@ namespace NKikimr { .Stats = PerDiskStats, .EnableRequestMod3x3ForMinLatency = enableRequestMod3x3ForMinLatency, .AccelerationParams = GetAccelerationParams(), - }), + }, std::move(ev->TraceId)), ev->Get()->Deadline ); } else { diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_status.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_status.cpp index 6bab0545cd51..2eb56e8c78e0 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_status.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_status.cpp @@ -129,7 +129,8 @@ class TBlobStorageGroupStatusRequest : public TBlobStorageGroupRequestActorSpan.name() : TString(); - } - static const TSpan Empty; private: diff --git a/ydb/library/actors/wilson/wilson_trace.h b/ydb/library/actors/wilson/wilson_trace.h index 4eee4dfa8eb7..65dade260da4 100644 --- a/ydb/library/actors/wilson/wilson_trace.h +++ b/ydb/library/actors/wilson/wilson_trace.h @@ -208,10 +208,6 @@ namespace NWilson { return Verbosity; } - ui32 GetTimeToLive() const { - return TimeToLive; - } - const void *GetTraceIdPtr() const { return TraceId.data(); } static constexpr size_t GetTraceIdSize() { return sizeof(TTrace); } const void *GetSpanIdPtr() const { return &SpanId; } From 86413a0f97f21e4b6d23c507020d094521b87aa2 Mon Sep 17 00:00:00 2001 From: Sergey Belyakov Date: Tue, 19 Nov 2024 13:08:00 +0000 Subject: [PATCH 11/13] Merge mistakes --- ydb/core/blobstorage/dsproxy/dsproxy.h | 2 +- ydb/core/blobstorage/dsproxy/dsproxy_block.cpp | 2 +- ydb/core/blobstorage/dsproxy/dsproxy_get.cpp | 2 +- ydb/core/blobstorage/dsproxy/dsproxy_indexrestoreget.cpp | 2 +- ydb/core/blobstorage/dsproxy/dsproxy_patch.cpp | 2 +- ydb/core/blobstorage/dsproxy/dsproxy_put.cpp | 2 +- ydb/core/blobstorage/dsproxy/dsproxy_request.cpp | 7 ++----- 7 files changed, 8 insertions(+), 11 deletions(-) diff --git a/ydb/core/blobstorage/dsproxy/dsproxy.h b/ydb/core/blobstorage/dsproxy/dsproxy.h index 8169c564283b..1d839f3a48e4 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy.h +++ b/ydb/core/blobstorage/dsproxy/dsproxy.h @@ -718,7 +718,7 @@ struct TBlobStorageGroupMultiPutParameters { return maxRestarts; } }; -IActor* CreateBlobStorageGroupPutRequest(TBlobStorageGroupMultiPutParameters params, NWilson::TTraceId traceId); +IActor* CreateBlobStorageGroupPutRequest(TBlobStorageGroupMultiPutParameters params); struct TBlobStorageGroupGetParameters { TBlobStorageGroupRequestActor::TCommonParameters Common; diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_block.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_block.cpp index 9edbc3e81c3d..1a5da7b27629 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_block.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_block.cpp @@ -172,7 +172,7 @@ class TBlobStorageGroupBlockRequest : public TBlobStorageGroupRequestActorToString()); + span.Attribute("event", params.Common.Event->ToString()); } params.Common.Span = std::move(span); return new TBlobStorageGroupBlockRequest(params); diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_get.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_get.cpp index 162abafd817f..eb3a0a7bb482 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_get.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_get.cpp @@ -468,7 +468,7 @@ class TBlobStorageGroupGetRequest : public TBlobStorageGroupRequestActorToString()); + span.Attribute("event", params.Common.Event->ToString()); } params.Common.Span = std::move(span); return new TBlobStorageGroupGetRequest(params); diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_indexrestoreget.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_indexrestoreget.cpp index 6e3c2e729a49..89e36c3c814f 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_indexrestoreget.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_indexrestoreget.cpp @@ -391,7 +391,7 @@ class TBlobStorageGroupIndexRestoreGetRequest IActor* CreateBlobStorageGroupIndexRestoreGetRequest(TBlobStorageGroupRestoreGetParameters params, NWilson::TTraceId traceId) { NWilson::TSpan span(TWilson::BlobStorage, std::move(traceId), "DSProxy.IndexRestoreGet"); if (span) { - span.Attribute("event", ev->ToString()); + span.Attribute("event", params.Common.Event->ToString()); } params.Common.Span = std::move(span); return new TBlobStorageGroupIndexRestoreGetRequest(params); diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_patch.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_patch.cpp index 664a939233c0..642ec68d1475 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_patch.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_patch.cpp @@ -1041,7 +1041,7 @@ class TBlobStorageGroupPatchRequest : public TBlobStorageGroupRequestActorToString()); + span.Attribute("event", params.Common.Event->ToString()); } params.Common.Span = std::move(span); return new TBlobStorageGroupPatchRequest(params); diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_put.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_put.cpp index e2f635778120..dcd0b53829e1 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_put.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_put.cpp @@ -752,7 +752,7 @@ class TBlobStorageGroupPutRequest : public TBlobStorageGroupRequestActorToString()); + span.Attribute("event", params.Common.Event->ToString()); } params.Common.Span = std::move(span); return new TBlobStorageGroupPutRequest(params); diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_request.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_request.cpp index ff1e020fcba4..3b806162e7a3 100644 --- a/ydb/core/blobstorage/dsproxy/dsproxy_request.cpp +++ b/ydb/core/blobstorage/dsproxy/dsproxy_request.cpp @@ -126,12 +126,11 @@ namespace NKikimr { .Now = TActivationContext::Now(), .StoragePoolCounters = StoragePoolCounters, .RestartCounter = ev->Get()->RestartCounter, - .TraceId = std::move(ev->TraceId), .Event = ev->Get(), .ExecutionRelay = ev->Get()->ExecutionRelay, .LatencyQueueKind = kind, }, - }), + }, std::move(ev->TraceId)), ev->Get()->Deadline ); } @@ -217,7 +216,6 @@ namespace NKikimr { .Now = TActivationContext::Now(), .StoragePoolCounters = StoragePoolCounters, .RestartCounter = ev->Get()->RestartCounter, - .TraceId = std::move(ev->TraceId), .Event = ev->Get(), .ExecutionRelay = ev->Get()->ExecutionRelay, .LatencyQueueKind = kind @@ -364,11 +362,10 @@ namespace NKikimr { .Now = TActivationContext::Now(), .StoragePoolCounters = StoragePoolCounters, .RestartCounter = ev->Get()->RestartCounter, - .TraceId = std::move(ev->TraceId), .Event = ev->Get(), .ExecutionRelay = ev->Get()->ExecutionRelay } - }), + }, std::move(ev->TraceId)), ev->Get()->Deadline ); } else { From ea473b429eda375db2bf3f1364e7c122a22b4f6a Mon Sep 17 00:00:00 2001 From: Sergey Belyakov Date: Tue, 19 Nov 2024 13:29:26 +0000 Subject: [PATCH 12/13] Fix typo --- ydb/core/blobstorage/dsproxy/ut/dsproxy_env_mock_ut.h | 9 +++------ ydb/core/cms/json_proxy_proto.h | 2 +- 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/ydb/core/blobstorage/dsproxy/ut/dsproxy_env_mock_ut.h b/ydb/core/blobstorage/dsproxy/ut/dsproxy_env_mock_ut.h index 51e3b7b2dad8..74fcfe01ca57 100644 --- a/ydb/core/blobstorage/dsproxy/ut/dsproxy_env_mock_ut.h +++ b/ydb/core/blobstorage/dsproxy/ut/dsproxy_env_mock_ut.h @@ -120,7 +120,6 @@ struct TDSProxyEnv { .Now = TInstant::Now(), .StoragePoolCounters = StoragePoolCounters, .RestartCounter = ev->Get()->RestartCounter, - .TraceId = std::move(ev->TraceId), .Event = ev->Get(), .ExecutionRelay = ev->Get()->ExecutionRelay, .LatencyQueueKind = kind, @@ -128,7 +127,7 @@ struct TDSProxyEnv { .TimeStatsEnabled = Mon->TimeStats.IsEnabled(), .Stats = PerDiskStatsPtr, .EnableRequestMod3x3ForMinLatency = false, - })); + }, std::move(ev->TraceId))); } std::unique_ptr CreatePutRequestActor(TBatchedVec &batched, @@ -170,13 +169,12 @@ struct TDSProxyEnv { .Now = TInstant::Now(), .StoragePoolCounters = StoragePoolCounters, .RestartCounter = ev->Get()->RestartCounter, - .TraceId = std::move(ev->TraceId), .Event = ev->Get(), .ExecutionRelay = ev->Get()->ExecutionRelay, .LatencyQueueKind = kind, }, .NodeLayout = TNodeLayoutInfoPtr(NodeLayoutInfo) - })); + }, std::move(ev->TraceId))); } std::unique_ptr CreatePatchRequestActor(TEvBlobStorage::TEvPatch::TPtr &ev, bool useVPatch = false) { @@ -191,12 +189,11 @@ struct TDSProxyEnv { .Now = TInstant::Now(), .StoragePoolCounters = StoragePoolCounters, .RestartCounter = ev->Get()->RestartCounter, - .TraceId = std::move(ev->TraceId), .Event = ev->Get(), .ExecutionRelay = ev->Get()->ExecutionRelay }, .UseVPatch = useVPatch - })); + }, std::move(ev->TraceId))); } }; diff --git a/ydb/core/cms/json_proxy_proto.h b/ydb/core/cms/json_proxy_proto.h index 2d484c3febd0..ef9a98c380ae 100644 --- a/ydb/core/cms/json_proxy_proto.h +++ b/ydb/core/cms/json_proxy_proto.h @@ -79,7 +79,7 @@ class TJsonProxyProto : public TActorBootstrapped { else if (name == ".NKikimrConfig.TImmediateControlsConfig.TVDiskControls") return ReplyWithTypeDescription(*NKikimrConfig::TImmediateControlsConfig::TVDiskControls::descriptor(), ctx); else if (name == ".NKikimrConfig.TImmediateControlsConfig.TTabletControls") - return ReplyWithTypeDescription(*NKikimrConfig::TImmediateControlsConfig::TTabletControls::descriptor(), ctx + return ReplyWithTypeDescription(*NKikimrConfig::TImmediateControlsConfig::TTabletControls::descriptor(), ctx); else if (name == ".NKikimrConfig.TImmediateControlsConfig.TDSProxyControls") return ReplyWithTypeDescription(*NKikimrConfig::TImmediateControlsConfig::TDSProxyControls::descriptor(), ctx); else if (name == ".NKikimrConfig.TImmediateControlsConfig.TBlobStorageControllerControls") From 439f9c3beb51a256b6159f2c6f1ac4ad700d507a Mon Sep 17 00:00:00 2001 From: Sergey Belyakov Date: Wed, 20 Nov 2024 09:49:00 +0000 Subject: [PATCH 13/13] Fix UT counting events --- ydb/core/blobstorage/ut_blobstorage/counting_events.cpp | 6 +++++- ydb/core/blobstorage/ut_blobstorage/lib/env.h | 3 ++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/ydb/core/blobstorage/ut_blobstorage/counting_events.cpp b/ydb/core/blobstorage/ut_blobstorage/counting_events.cpp index e9de1c0fab97..e5b112e445b4 100644 --- a/ydb/core/blobstorage/ut_blobstorage/counting_events.cpp +++ b/ydb/core/blobstorage/ut_blobstorage/counting_events.cpp @@ -86,7 +86,11 @@ Y_UNIT_TEST_SUITE(CountingEvents) { void CountingEventsTest(TString typeOperation, ui32 eventsCount, TBlobStorageGroupType groupType) { - TEnvironmentSetup env(true, groupType); + TEnvironmentSetup env({ + .VDiskReplPausedAtStart = true, + .Erasure = groupType, + .UseActorSystemTimeInBSQueue = false, + }); auto& runtime = env.Runtime; env.CreateBoxAndPool(); diff --git a/ydb/core/blobstorage/ut_blobstorage/lib/env.h b/ydb/core/blobstorage/ut_blobstorage/lib/env.h index 942a64e8b1e8..ad829ba64eaf 100644 --- a/ydb/core/blobstorage/ut_blobstorage/lib/env.h +++ b/ydb/core/blobstorage/ut_blobstorage/lib/env.h @@ -51,6 +51,7 @@ struct TEnvironmentSetup { const float SlowDiskThreshold = 2; const float VDiskPredictedDelayMultiplier = 1; const ui32 MaxNumOfSlowDisks = 2; + const bool UseActorSystemTimeInBSQueue = true; }; const TSettings Settings; @@ -366,7 +367,7 @@ struct TEnvironmentSetup { auto config = MakeIntrusive(new TMockPDiskServiceFactory(*this)); config->BlobStorageConfig.MutableServiceSet()->AddAvailabilityDomains(DomainId); config->VDiskReplPausedAtStart = Settings.VDiskReplPausedAtStart; - config->UseActorSystemTimeInBSQueue = true; + config->UseActorSystemTimeInBSQueue = Settings.UseActorSystemTimeInBSQueue; if (Settings.ConfigPreprocessor) { Settings.ConfigPreprocessor(nodeId, *config); }