Skip to content

[enhancement] Skip tests if their dependencies fail #3500

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Jun 12, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions docs/manpage.rst
Original file line number Diff line number Diff line change
Expand Up @@ -1633,6 +1633,13 @@ The following list summarizes the schema changes (version numbers refer to schem

Every reference tuple of ``.[].runs[].testcases[].perfvalues`` has now an additional element at the end, denoting the result (``pass`` or ``fail``) for the corresponding performance variable.

.. admonition:: 4.2

A new test ``result`` is introduced: ``fail_deps``, for when the dependencies of a test fail and the test is skipped.

Since ReFrame 4.9, if a test's dependencies fail, the test is skipped and is put in the ``fail_deps`` state.
Previously, it was treated as a normal failure.


Environment
===========
Expand Down
6 changes: 0 additions & 6 deletions reframe/core/exceptions.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,12 +94,6 @@ class TaskExit(ReframeError):
'''Raised when a regression task must exit the pipeline prematurely.'''


class TaskDependencyError(ReframeError):
'''Raised inside a regression task by the runtime when one of its
dependencies has failed.
'''


class FailureLimitError(ReframeError):
'''Raised when the limit of test failures has been reached.'''

Expand Down
2 changes: 1 addition & 1 deletion reframe/frontend/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -1352,7 +1352,7 @@ def _case_failed(t):
if not rec:
return False

return rec['result'] == 'fail' or rec['result'] == 'abort'
return rec['result'] in {'fail', 'fail_deps', 'abort'}

testcases = list(filter(_case_failed, testcases))
printer.verbose(
Expand Down
30 changes: 27 additions & 3 deletions reframe/frontend/executors/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -203,6 +203,7 @@ def __init__(self, case, listeners=None, timeout=None):
self._exc_info = (None, None, None)
self._listeners = listeners or []
self._skipped = False
self._failed_deps = False

# Reference count for dependent tests; safe to cleanup the test only
# if it is zero
Expand Down Expand Up @@ -293,6 +294,10 @@ def failed(self):
return (self._failed_stage is not None and
not self._aborted and not self._skipped)

@property
def failed_deps(self):
return self._failed_deps

@property
def state(self):
if self.failed:
Expand Down Expand Up @@ -343,7 +348,10 @@ def result(self):
elif self.aborted:
return 'abort'
elif self.skipped:
return 'skip'
if self.failed_deps:
return 'fail_deps'
else:
return 'skip'
else:
return '<unknown>'

Expand Down Expand Up @@ -511,6 +519,18 @@ def skip(self, exc_info=None):
self._exc_info = exc_info or sys.exc_info()
self._notify_listeners('on_task_skip')

def skip_from_deps(self):
self.do_skip('dependencies failed')
self._failed_deps = True

def do_skip(self, message):
try:
# We raise the SkipTestError here and catch it immediately in
# order for `skip()` to get the correct exception context.
raise SkipTestError(message)
except SkipTestError:
self.skip()

def abort(self, cause=None):
if self.failed or self._aborted:
return
Expand Down Expand Up @@ -685,8 +705,12 @@ def runall(self, testcases, restored_cases=None):
self._printer.timestamp('Finished on', 'short double line')

def _retry_failed(self, cases):
def _failed_or_deps():
return self._stats.failed() + [t for t in self._stats.skipped()
if t.failed_deps]

rt = runtime.runtime()
failures = self._stats.failed()
failures = _failed_or_deps()
while (failures and rt.current_run < self._max_retries):
num_failed_checks = len({tc.check.unique_name for tc in failures})
rt.next_run()
Expand All @@ -702,7 +726,7 @@ def _retry_failed(self, cases):
cases_graph, _ = dependencies.build_deps(failed_cases, cases)
failed_cases = dependencies.toposort(cases_graph, is_subgraph=True)
self._runall(failed_cases)
failures = self._stats.failed()
failures = _failed_or_deps()

def _runall(self, testcases):
def print_separator(check, prefix):
Expand Down
27 changes: 8 additions & 19 deletions reframe/frontend/executors/policies.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,6 @@
import reframe.utility as util
from reframe.core.exceptions import (FailureLimitError,
RunSessionTimeout,
SkipTestError,
TaskDependencyError,
TaskExit)
from reframe.core.logging import getlogger, level_from_str
from reframe.core.pipeline import (CompileOnlyRegressionTest,
Expand Down Expand Up @@ -113,18 +111,13 @@ def runcase(self, case):
# NOTE: Restored dependencies are not in the task_index
if any(self._task_index[c].failed
for c in case.deps if c in self._task_index):
raise TaskDependencyError('dependencies failed')
task.skip_from_deps()
raise TaskExit

if any(self._task_index[c].skipped
for c in case.deps if c in self._task_index):

# We raise the SkipTestError here and catch it immediately in
# order for `skip()` to get the correct exception context.
try:
raise SkipTestError('skipped due to skipped dependencies')
except SkipTestError as e:
task.skip()
raise TaskExit from e
task.do_skip('skipped due to skipped dependencies')
raise TaskExit

task.setup(task.testcase.partition,
task.testcase.environ,
Expand Down Expand Up @@ -449,12 +442,9 @@ def _advance_all(self, tasks, timeout=None):

def _advance_startup(self, task):
if self.deps_skipped(task):
try:
raise SkipTestError('skipped due to skipped dependencies')
except SkipTestError as e:
task.skip()
self._current_tasks.remove(task)
return 1
task.do_skip('skipped due to skipped dependencies')
self._current_tasks.remove(task)
return 1
elif self.deps_succeeded(task):
try:
if task.check.is_dry_run():
Expand All @@ -479,8 +469,7 @@ def _advance_startup(self, task):

return 1
elif self.deps_failed(task):
exc = TaskDependencyError('dependencies failed')
task.fail((type(exc), exc, None))
task.skip_from_deps()
self._current_tasks.remove(task)
return 1
else:
Expand Down
6 changes: 5 additions & 1 deletion reframe/frontend/printer.py
Original file line number Diff line number Diff line change
Expand Up @@ -176,7 +176,7 @@ def _print_failure_info(rec, runid, total_runs):
continue

for r in run_info['testcases']:
if r['result'] in {'pass', 'abort', 'skip'}:
if r['result'] in {'pass', 'abort', 'skip', 'fail_deps'}:
continue

_print_failure_info(r, run_no, len(report['runs']))
Expand Down Expand Up @@ -248,6 +248,10 @@ def retry_report(self, report):
for run in reversed(report['runs'][1:]):
runidx = run['run_index']
for tc in run['testcases']:
if tc['result'] == 'fail_deps':
# Ignore tests that were skipped due to failed deps
continue

# Overwrite entry from previous run if available
tc_info = format_testcase_from_json(tc)
if tc_info not in retried_tc:
Expand Down
2 changes: 1 addition & 1 deletion reframe/frontend/reporting/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@
# The schema data version
# Major version bumps are expected to break the validation of previous schemas

DATA_VERSION = '4.1'
DATA_VERSION = '4.2'
_SCHEMA = None
_RESERVED_SESSION_INFO_KEYS = None
_DATETIME_FMT = r'%Y%m%dT%H%M%S%z'
Expand Down
2 changes: 1 addition & 1 deletion unittests/test_cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,7 @@ def test_check_restore_session_failed(run_reframe, tmp_path):
)
assert set(report.slice('name', when=('fail_phase', 'sanity'))) == {'T2'}
assert set(report.slice('name',
when=('fail_phase', 'startup'))) == {'T7', 'T9'}
when=('result', 'fail_deps'))) == {'T7', 'T9'}
assert set(report.slice('name', when=('fail_phase', 'setup'))) == {'T8'}
assert report['runs'][-1]['num_cases'] == 4

Expand Down
17 changes: 6 additions & 11 deletions unittests/test_policies.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,7 @@
from reframe.core.exceptions import (AbortTaskError,
FailureLimitError,
ForceExitError,
RunSessionTimeout,
TaskDependencyError)
RunSessionTimeout)
from unittests.resources.checks.hellocheck import HelloTest
from unittests.resources.checks.frontend_checks import (
BadSetupCheck,
Expand Down Expand Up @@ -353,17 +352,13 @@ def assert_dependency_run(runner):
assert_runall(runner)
stats = runner.stats
assert 10 == stats.num_cases(0)
assert 4 == len(stats.failed())
for tf in stats.failed():
check = tf.testcase.check
_, exc_value, _ = tf.exc_info
if check.name == 'T7' or check.name == 'T9':
assert isinstance(exc_value, TaskDependencyError)
assert 2 == len(stats.failed())
assert 2 == len(stats.skipped())

# Check that cleanup is executed properly for successful tests as well
for t in stats.tasks():
check = t.testcase.check
if t.failed:
if t.failed or t.skipped:
continue

if t.ref_count == 0:
Expand Down Expand Up @@ -499,7 +494,7 @@ def test_concurrency_unlimited(make_async_runner, make_cases,
# assert begin_stamps[-1] <= end_stamps[0]
#
if begin_stamps[-1] > end_stamps[0]:
pytest.skip('the system seems too much loaded.')
pytest.skip('the system seems too loaded')


def test_concurrency_limited(make_async_runner, make_cases,
Expand Down Expand Up @@ -543,7 +538,7 @@ def test_concurrency_limited(make_async_runner, make_cases,
# corresponding strict check would be:
# self.assertTrue(self.begin_stamps[max_jobs-1] <= self.end_stamps[0])
if begin_stamps[max_jobs-1] > end_stamps[0]:
pytest.skip('the system seems too loaded.')
pytest.skip('the system seems too loaded')


def test_concurrency_none(make_async_runner, make_cases,
Expand Down
Loading