Skip to content

Commit 6951d91

Browse files
jsteemannMongoDB Bot
authored andcommitted
Revert "SERVER-98650 Correctly handle StorageUnavailable exceptions during query restore (#32463)
GitOrigin-RevId: f099987179b0e9919aa2fcba25afe48f35e53ae9
1 parent be32470 commit 6951d91

16 files changed

+35
-173
lines changed

jstests/noPassthrough/storage_unavailable_exception_during_query_restore.js

Lines changed: 0 additions & 79 deletions
This file was deleted.

src/mongo/db/exec/batched_delete_stage.cpp

Lines changed: 4 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,6 @@ namespace mongo {
5555

5656
MONGO_FAIL_POINT_DEFINE(throwWriteConflictExceptionInBatchedDeleteStage);
5757
MONGO_FAIL_POINT_DEFINE(batchedDeleteStageSleepAfterNDocuments);
58-
MONGO_FAIL_POINT_DEFINE(batchedDeleteStageThrowTemporarilyUnavailableException);
5958

6059
namespace {
6160

@@ -229,22 +228,15 @@ PlanStage::StageState BatchedDeleteStage::doWork(WorkingSetID* out) {
229228
_commitStagedDeletes = _passStagingComplete || !_stagedDeletesBuffer.empty();
230229
}
231230

232-
// We need to check if 'planStateStage' is 'NEED_YIELD' earlier than we check 'isEOF()'.
233-
// The reason is that 'isEOF()' returns true if we have completed staging and have an empty
234-
// buffer of staged deletes. However, even if 'isEOF()' returns true, the 'planStateStage' can
235-
// be 'NEED_YIELD' here if inside 'restoreState()' in '_deleteBatch()' a
236-
// 'StorageEngineException' was thrown and caught. In this case, the 'planStateStage' is
237-
// 'NEED_YIELD' and 'isEOF()' already returns true.
238-
if (planStageState == PlanStage::NEED_YIELD) {
239-
*out = idToReturn;
240-
return PlanStage::NEED_YIELD;
241-
}
242-
243231
if (isEOF()) {
244232
invariant(planStageState != PlanStage::NEED_YIELD);
245233
return PlanStage::IS_EOF;
246234
}
247235

236+
if (planStageState == PlanStage::NEED_YIELD) {
237+
*out = idToReturn;
238+
}
239+
248240
return planStageState;
249241
}
250242

@@ -498,13 +490,6 @@ PlanStage::StageState BatchedDeleteStage::_tryRestoreState(WorkingSetID* out) {
498490
"BatchedDeleteStage::_tryRestoreState",
499491
collection()->ns().ns(),
500492
[&] {
501-
if (MONGO_unlikely(
502-
batchedDeleteStageThrowTemporarilyUnavailableException.shouldFail())) {
503-
throwTemporarilyUnavailableException(
504-
str::stream()
505-
<< "Hit failpoint '"
506-
<< batchedDeleteStageThrowTemporarilyUnavailableException.getName() << "'.");
507-
}
508493
child()->restoreState(&collection());
509494
return PlanStage::NEED_TIME;
510495
},

src/mongo/db/exec/cached_plan.cpp

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -194,8 +194,7 @@ Status CachedPlanStage::tryYield(PlanYieldPolicy* yieldPolicy) {
194194
// In all cases, the actual yielding happens here.
195195
if (yieldPolicy->shouldYieldOrInterrupt(expCtx()->opCtx)) {
196196
// Here's where we yield.
197-
return yieldPolicy->yieldOrInterrupt(
198-
expCtx()->opCtx, nullptr, RestoreContext::RestoreType::kYield);
197+
return yieldPolicy->yieldOrInterrupt(expCtx()->opCtx);
199198
}
200199

201200
return Status::OK();

src/mongo/db/exec/index_scan.cpp

Lines changed: 1 addition & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -60,8 +60,6 @@ int sgn(int i) {
6060

6161
namespace mongo {
6262

63-
MONGO_FAIL_POINT_DEFINE(throwDuringIndexScanRestore);
64-
6563
// static
6664
const char* IndexScan::kStageType = "IXSCAN";
6765

@@ -286,14 +284,8 @@ void IndexScan::doSaveStateRequiresIndex() {
286284
}
287285

288286
void IndexScan::doRestoreStateRequiresIndex() {
289-
if (_indexCursor) {
290-
if (MONGO_unlikely(throwDuringIndexScanRestore.shouldFail())) {
291-
throwTemporarilyUnavailableException(str::stream()
292-
<< "Hit failpoint '"
293-
<< throwDuringIndexScanRestore.getName() << "'.");
294-
}
287+
if (_indexCursor)
295288
_indexCursor->restore();
296-
}
297289
}
298290

299291
void IndexScan::doDetachFromOperationContext() {

src/mongo/db/exec/multi_plan.cpp

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -189,8 +189,7 @@ void MultiPlanStage::tryYield(PlanYieldPolicy* yieldPolicy) {
189189
// 3) we need to yield and retry due to a WriteConflictException.
190190
// In all cases, the actual yielding happens here.
191191
if (yieldPolicy->shouldYieldOrInterrupt(expCtx()->opCtx)) {
192-
uassertStatusOK(yieldPolicy->yieldOrInterrupt(
193-
expCtx()->opCtx, nullptr, RestoreContext::RestoreType::kYield));
192+
uassertStatusOK(yieldPolicy->yieldOrInterrupt(expCtx()->opCtx));
194193
}
195194
}
196195

src/mongo/db/exec/sbe/stages/stages.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -521,8 +521,7 @@ class CanInterrupt {
521521
opCtx->checkForInterrupt();
522522
}
523523
} else if (_yieldPolicy->shouldYieldOrInterrupt(opCtx)) {
524-
uassertStatusOK(_yieldPolicy->yieldOrInterrupt(
525-
opCtx, nullptr, RestoreContext::RestoreType::kYield));
524+
uassertStatusOK(_yieldPolicy->yieldOrInterrupt(opCtx));
526525
}
527526
}
528527

src/mongo/db/exec/trial_stage.cpp

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -83,8 +83,7 @@ Status TrialStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) {
8383
throwWriteConflictException(
8484
"Write conflict during TrialStage plan selection and yielding is disabled.");
8585
}
86-
auto yieldStatus = yieldPolicy->yieldOrInterrupt(
87-
expCtx()->opCtx, nullptr, RestoreContext::RestoreType::kYield);
86+
auto yieldStatus = yieldPolicy->yieldOrInterrupt(expCtx()->opCtx);
8887
if (!yieldStatus.isOK()) {
8988
return yieldStatus;
9089
}

src/mongo/db/query/mock_yield_policies.h

Lines changed: 4 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -49,9 +49,7 @@ class MockYieldPolicy : public PlanYieldPolicy {
4949
MONGO_UNREACHABLE;
5050
}
5151

52-
void restoreState(OperationContext* opCtx,
53-
const Yieldable* yieldable,
54-
RestoreContext::RestoreType restoreType) final {
52+
void restoreState(OperationContext* opCtx, const Yieldable* yieldable) override final {
5553
MONGO_UNREACHABLE;
5654
}
5755
};
@@ -69,9 +67,7 @@ class AlwaysTimeOutYieldPolicy final : public MockYieldPolicy {
6967
return true;
7068
}
7169

72-
Status yieldOrInterrupt(OperationContext*,
73-
std::function<void()> whileYieldingFn,
74-
RestoreContext::RestoreType restoreType) override {
70+
Status yieldOrInterrupt(OperationContext*, std::function<void()> whileYieldingFn) override {
7571
return {ErrorCodes::ExceededTimeLimit, "Using AlwaysTimeOutYieldPolicy"};
7672
}
7773
};
@@ -89,9 +85,7 @@ class AlwaysPlanKilledYieldPolicy final : public MockYieldPolicy {
8985
return true;
9086
}
9187

92-
Status yieldOrInterrupt(OperationContext*,
93-
std::function<void()> whileYieldingFn,
94-
RestoreContext::RestoreType restoreType) override {
88+
Status yieldOrInterrupt(OperationContext*, std::function<void()> whileYieldingFn) override {
9589
return {ErrorCodes::QueryPlanKilled, "Using AlwaysPlanKilledYieldPolicy"};
9690
}
9791
};
@@ -109,9 +103,7 @@ class NoopYieldPolicy final : public MockYieldPolicy {
109103
return false;
110104
}
111105

112-
Status yieldOrInterrupt(OperationContext*,
113-
std::function<void()> whileYieldingFn,
114-
RestoreContext::RestoreType restoreType) override {
106+
Status yieldOrInterrupt(OperationContext*, std::function<void()> whileYieldingFn) override {
115107
MONGO_UNREACHABLE;
116108
}
117109
};

src/mongo/db/query/plan_executor_impl.cpp

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -265,8 +265,7 @@ void PlanExecutorImpl::restoreState(const RestoreContext& context) {
265265
throw;
266266

267267
// Handles retries by calling restoreStateWithoutRetrying() in a loop.
268-
uassertStatusOK(_yieldPolicy->yieldOrInterrupt(
269-
getOpCtx(), nullptr /* whileYieldingFn */, context.type()));
268+
uassertStatusOK(_yieldPolicy->yieldOrInterrupt(getOpCtx()));
270269
}
271270
}
272271

@@ -384,8 +383,7 @@ PlanExecutor::ExecState PlanExecutorImpl::_getNextImpl(Snapshotted<Document>* ob
384383
};
385384

386385
if (_yieldPolicy->shouldYieldOrInterrupt(_opCtx)) {
387-
uassertStatusOK(_yieldPolicy->yieldOrInterrupt(
388-
_opCtx, whileYieldingFn, RestoreContext::RestoreType::kYield));
386+
uassertStatusOK(_yieldPolicy->yieldOrInterrupt(_opCtx, whileYieldingFn));
389387
}
390388

391389
WorkingSetID id = WorkingSet::INVALID_ID;

src/mongo/db/query/plan_insert_listener.cpp

Lines changed: 10 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -114,20 +114,16 @@ void waitForInserts(OperationContext* opCtx,
114114
ON_BLOCK_EXIT([curOp] { curOp->resumeTimer(); });
115115

116116
notifier->prepareForWait(opCtx);
117-
auto yieldResult = yieldPolicy->yieldOrInterrupt(
118-
opCtx,
119-
[opCtx, &notifier] {
120-
const auto deadline = awaitDataState(opCtx).waitForInsertsDeadline;
121-
notifier->waitUntil(opCtx, deadline);
122-
if (MONGO_unlikely(planExecutorHangWhileYieldedInWaitForInserts.shouldFail())) {
123-
LOGV2(4452903,
124-
"PlanExecutor - planExecutorHangWhileYieldedInWaitForInserts fail point "
125-
"enabled. "
126-
"Blocking until fail point is disabled");
127-
planExecutorHangWhileYieldedInWaitForInserts.pauseWhileSet();
128-
}
129-
},
130-
RestoreContext::RestoreType::kYield);
117+
auto yieldResult = yieldPolicy->yieldOrInterrupt(opCtx, [opCtx, &notifier] {
118+
const auto deadline = awaitDataState(opCtx).waitForInsertsDeadline;
119+
notifier->waitUntil(opCtx, deadline);
120+
if (MONGO_unlikely(planExecutorHangWhileYieldedInWaitForInserts.shouldFail())) {
121+
LOGV2(4452903,
122+
"PlanExecutor - planExecutorHangWhileYieldedInWaitForInserts fail point enabled. "
123+
"Blocking until fail point is disabled");
124+
planExecutorHangWhileYieldedInWaitForInserts.pauseWhileSet();
125+
}
126+
});
131127
notifier->doneWaiting(opCtx);
132128

133129
uassertStatusOK(yieldResult);

0 commit comments

Comments
 (0)