diff --git a/docs/manpage.rst b/docs/manpage.rst index d57ab6244..1fe4d5e66 100644 --- a/docs/manpage.rst +++ b/docs/manpage.rst @@ -1633,6 +1633,13 @@ The following list summarizes the schema changes (version numbers refer to schem Every reference tuple of ``.[].runs[].testcases[].perfvalues`` has now an additional element at the end, denoting the result (``pass`` or ``fail``) for the corresponding performance variable. + .. admonition:: 4.2 + + A new test ``result`` is introduced: ``fail_deps``, for when the dependencies of a test fail and the test is skipped. + + Since ReFrame 4.9, if a test's dependencies fail, the test is skipped and is put in the ``fail_deps`` state. + Previously, it was treated as a normal failure. + Environment =========== diff --git a/reframe/core/exceptions.py b/reframe/core/exceptions.py index 257bc5a2b..fbcec3794 100644 --- a/reframe/core/exceptions.py +++ b/reframe/core/exceptions.py @@ -94,12 +94,6 @@ class TaskExit(ReframeError): '''Raised when a regression task must exit the pipeline prematurely.''' -class TaskDependencyError(ReframeError): - '''Raised inside a regression task by the runtime when one of its - dependencies has failed. - ''' - - class FailureLimitError(ReframeError): '''Raised when the limit of test failures has been reached.''' diff --git a/reframe/frontend/cli.py b/reframe/frontend/cli.py index 9ee2b81e7..ef9efb402 100644 --- a/reframe/frontend/cli.py +++ b/reframe/frontend/cli.py @@ -1352,7 +1352,7 @@ def _case_failed(t): if not rec: return False - return rec['result'] == 'fail' or rec['result'] == 'abort' + return rec['result'] in {'fail', 'fail_deps', 'abort'} testcases = list(filter(_case_failed, testcases)) printer.verbose( diff --git a/reframe/frontend/executors/__init__.py b/reframe/frontend/executors/__init__.py index 8199369c8..5b14da536 100644 --- a/reframe/frontend/executors/__init__.py +++ b/reframe/frontend/executors/__init__.py @@ -203,6 +203,7 @@ def __init__(self, case, listeners=None, timeout=None): self._exc_info = (None, None, None) self._listeners = listeners or [] self._skipped = False + self._failed_deps = False # Reference count for dependent tests; safe to cleanup the test only # if it is zero @@ -293,6 +294,10 @@ def failed(self): return (self._failed_stage is not None and not self._aborted and not self._skipped) + @property + def failed_deps(self): + return self._failed_deps + @property def state(self): if self.failed: @@ -343,7 +348,10 @@ def result(self): elif self.aborted: return 'abort' elif self.skipped: - return 'skip' + if self.failed_deps: + return 'fail_deps' + else: + return 'skip' else: return '' @@ -511,6 +519,18 @@ def skip(self, exc_info=None): self._exc_info = exc_info or sys.exc_info() self._notify_listeners('on_task_skip') + def skip_from_deps(self): + self.do_skip('dependencies failed') + self._failed_deps = True + + def do_skip(self, message): + try: + # We raise the SkipTestError here and catch it immediately in + # order for `skip()` to get the correct exception context. + raise SkipTestError(message) + except SkipTestError: + self.skip() + def abort(self, cause=None): if self.failed or self._aborted: return @@ -685,8 +705,12 @@ def runall(self, testcases, restored_cases=None): self._printer.timestamp('Finished on', 'short double line') def _retry_failed(self, cases): + def _failed_or_deps(): + return self._stats.failed() + [t for t in self._stats.skipped() + if t.failed_deps] + rt = runtime.runtime() - failures = self._stats.failed() + failures = _failed_or_deps() while (failures and rt.current_run < self._max_retries): num_failed_checks = len({tc.check.unique_name for tc in failures}) rt.next_run() @@ -702,7 +726,7 @@ def _retry_failed(self, cases): cases_graph, _ = dependencies.build_deps(failed_cases, cases) failed_cases = dependencies.toposort(cases_graph, is_subgraph=True) self._runall(failed_cases) - failures = self._stats.failed() + failures = _failed_or_deps() def _runall(self, testcases): def print_separator(check, prefix): diff --git a/reframe/frontend/executors/policies.py b/reframe/frontend/executors/policies.py index da31264e6..f339f8d73 100644 --- a/reframe/frontend/executors/policies.py +++ b/reframe/frontend/executors/policies.py @@ -12,8 +12,6 @@ import reframe.utility as util from reframe.core.exceptions import (FailureLimitError, RunSessionTimeout, - SkipTestError, - TaskDependencyError, TaskExit) from reframe.core.logging import getlogger, level_from_str from reframe.core.pipeline import (CompileOnlyRegressionTest, @@ -113,18 +111,13 @@ def runcase(self, case): # NOTE: Restored dependencies are not in the task_index if any(self._task_index[c].failed for c in case.deps if c in self._task_index): - raise TaskDependencyError('dependencies failed') + task.skip_from_deps() + raise TaskExit if any(self._task_index[c].skipped for c in case.deps if c in self._task_index): - - # We raise the SkipTestError here and catch it immediately in - # order for `skip()` to get the correct exception context. - try: - raise SkipTestError('skipped due to skipped dependencies') - except SkipTestError as e: - task.skip() - raise TaskExit from e + task.do_skip('skipped due to skipped dependencies') + raise TaskExit task.setup(task.testcase.partition, task.testcase.environ, @@ -449,12 +442,9 @@ def _advance_all(self, tasks, timeout=None): def _advance_startup(self, task): if self.deps_skipped(task): - try: - raise SkipTestError('skipped due to skipped dependencies') - except SkipTestError as e: - task.skip() - self._current_tasks.remove(task) - return 1 + task.do_skip('skipped due to skipped dependencies') + self._current_tasks.remove(task) + return 1 elif self.deps_succeeded(task): try: if task.check.is_dry_run(): @@ -479,8 +469,7 @@ def _advance_startup(self, task): return 1 elif self.deps_failed(task): - exc = TaskDependencyError('dependencies failed') - task.fail((type(exc), exc, None)) + task.skip_from_deps() self._current_tasks.remove(task) return 1 else: diff --git a/reframe/frontend/printer.py b/reframe/frontend/printer.py index e68d28b5e..269870346 100644 --- a/reframe/frontend/printer.py +++ b/reframe/frontend/printer.py @@ -176,7 +176,7 @@ def _print_failure_info(rec, runid, total_runs): continue for r in run_info['testcases']: - if r['result'] in {'pass', 'abort', 'skip'}: + if r['result'] in {'pass', 'abort', 'skip', 'fail_deps'}: continue _print_failure_info(r, run_no, len(report['runs'])) @@ -248,6 +248,10 @@ def retry_report(self, report): for run in reversed(report['runs'][1:]): runidx = run['run_index'] for tc in run['testcases']: + if tc['result'] == 'fail_deps': + # Ignore tests that were skipped due to failed deps + continue + # Overwrite entry from previous run if available tc_info = format_testcase_from_json(tc) if tc_info not in retried_tc: diff --git a/reframe/frontend/reporting/__init__.py b/reframe/frontend/reporting/__init__.py index a2d1b05bd..bfb8ec4fd 100644 --- a/reframe/frontend/reporting/__init__.py +++ b/reframe/frontend/reporting/__init__.py @@ -32,7 +32,7 @@ # The schema data version # Major version bumps are expected to break the validation of previous schemas -DATA_VERSION = '4.1' +DATA_VERSION = '4.2' _SCHEMA = None _RESERVED_SESSION_INFO_KEYS = None _DATETIME_FMT = r'%Y%m%dT%H%M%S%z' diff --git a/unittests/test_cli.py b/unittests/test_cli.py index 6598bdd8f..231cfc809 100644 --- a/unittests/test_cli.py +++ b/unittests/test_cli.py @@ -172,7 +172,7 @@ def test_check_restore_session_failed(run_reframe, tmp_path): ) assert set(report.slice('name', when=('fail_phase', 'sanity'))) == {'T2'} assert set(report.slice('name', - when=('fail_phase', 'startup'))) == {'T7', 'T9'} + when=('result', 'fail_deps'))) == {'T7', 'T9'} assert set(report.slice('name', when=('fail_phase', 'setup'))) == {'T8'} assert report['runs'][-1]['num_cases'] == 4 diff --git a/unittests/test_policies.py b/unittests/test_policies.py index 19ac55960..6e2cb566a 100644 --- a/unittests/test_policies.py +++ b/unittests/test_policies.py @@ -18,8 +18,7 @@ from reframe.core.exceptions import (AbortTaskError, FailureLimitError, ForceExitError, - RunSessionTimeout, - TaskDependencyError) + RunSessionTimeout) from unittests.resources.checks.hellocheck import HelloTest from unittests.resources.checks.frontend_checks import ( BadSetupCheck, @@ -353,17 +352,13 @@ def assert_dependency_run(runner): assert_runall(runner) stats = runner.stats assert 10 == stats.num_cases(0) - assert 4 == len(stats.failed()) - for tf in stats.failed(): - check = tf.testcase.check - _, exc_value, _ = tf.exc_info - if check.name == 'T7' or check.name == 'T9': - assert isinstance(exc_value, TaskDependencyError) + assert 2 == len(stats.failed()) + assert 2 == len(stats.skipped()) # Check that cleanup is executed properly for successful tests as well for t in stats.tasks(): check = t.testcase.check - if t.failed: + if t.failed or t.skipped: continue if t.ref_count == 0: @@ -499,7 +494,7 @@ def test_concurrency_unlimited(make_async_runner, make_cases, # assert begin_stamps[-1] <= end_stamps[0] # if begin_stamps[-1] > end_stamps[0]: - pytest.skip('the system seems too much loaded.') + pytest.skip('the system seems too loaded') def test_concurrency_limited(make_async_runner, make_cases, @@ -543,7 +538,7 @@ def test_concurrency_limited(make_async_runner, make_cases, # corresponding strict check would be: # self.assertTrue(self.begin_stamps[max_jobs-1] <= self.end_stamps[0]) if begin_stamps[max_jobs-1] > end_stamps[0]: - pytest.skip('the system seems too loaded.') + pytest.skip('the system seems too loaded') def test_concurrency_none(make_async_runner, make_cases,