|
1 |
| -#! /usr/bin/env python3 |
| 1 | +#!/usr/bin/env python3 |
2 | 2 | """
|
3 |
| - Copyright (C) 2023 Intel Corporation |
| 3 | + Copyright (C) 2024 Intel Corporation |
4 | 4 |
|
5 | 5 | Part of the Unified-Runtime Project, under the Apache License v2.0 with LLVM Exceptions.
|
6 | 6 | See LICENSE.TXT
|
|
11 | 11 | # The match files contain tests that are expected to fail.
|
12 | 12 |
|
13 | 13 | import os
|
14 |
| -import shlex |
15 | 14 | import sys
|
16 |
| -from argparse import ArgumentParser |
| 15 | +import argparse |
17 | 16 | import subprocess # nosec B404
|
18 |
| -import signal |
19 |
| -import re |
20 |
| -from collections import OrderedDict |
21 | 17 |
|
22 | 18 |
|
23 |
| -def _print_cmdline(cmd_args, env, cwd, file=sys.stderr): |
24 |
| - cwd = shlex.quote(cwd) |
25 |
| - env_args = " ".join( |
26 |
| - "%s=%s" % (shlex.quote(k), shlex.quote(v)) for k, v in env.items() |
| 19 | +def _ci(): |
| 20 | + return os.environ.get("CI") is not None |
| 21 | + |
| 22 | + |
| 23 | +def _color(): |
| 24 | + return sys.stdout.isatty() or os.environ.get("GTEST_COLOR").lower() == "yes" |
| 25 | + |
| 26 | + |
| 27 | +def _print_header(header, *args): |
| 28 | + if _ci(): |
| 29 | + # GitHub CI interprets this as a "group header" and will provide buttons to fold/unfold it |
| 30 | + print("##[group]{}".format(header.format(*args))) |
| 31 | + elif _color(): |
| 32 | + # Inverse color |
| 33 | + print("\033[7m{}\033[27m".format(header.format(*args))) |
| 34 | + else: |
| 35 | + print("### {}".format(header.format(*args))) |
| 36 | + |
| 37 | + |
| 38 | +def _print_end_header(): |
| 39 | + if _ci(): |
| 40 | + print("##[endgroup]") |
| 41 | + |
| 42 | + |
| 43 | +def _print_error(header, *args): |
| 44 | + if _color(): |
| 45 | + # "!!!" on a red background |
| 46 | + print("\033[41m!!!\033[0m {}".format(header.format(*args))) |
| 47 | + else: |
| 48 | + print("!!! {}".format(header.format(*args))) |
| 49 | + |
| 50 | + |
| 51 | +def _print_format(msg, *args): |
| 52 | + print(msg.format(*args)) |
| 53 | + |
| 54 | + |
| 55 | +def _print_environ(env): |
| 56 | + _print_header("Environment") |
| 57 | + for k, v in env.items(): |
| 58 | + _print_format("> {} = {}", k, v) |
| 59 | + _print_end_header() |
| 60 | + |
| 61 | + |
| 62 | +def _check_filter(cmd, filter): |
| 63 | + """ |
| 64 | + Checks that the filter matches at least one test for the given cmd |
| 65 | + """ |
| 66 | + sys.stdout.flush() |
| 67 | + check = subprocess.Popen( # nosec B603 |
| 68 | + cmd + ["--gtest_list_tests"], |
| 69 | + stdout=subprocess.PIPE, |
| 70 | + stderr=subprocess.DEVNULL, |
| 71 | + env=(os.environ | {"GTEST_FILTER": filter}), |
27 | 72 | )
|
28 |
| - cmd_str = " ".join(map(shlex.quote, cmd_args)) |
29 |
| - print(f"### env -C {cwd} -i {env_args} {cmd_str}", file=file) |
| 73 | + if not check.stdout.read(1): |
| 74 | + return False |
| 75 | + return True |
30 | 76 |
|
31 | 77 |
|
32 |
| -if __name__ == "__main__": |
| 78 | +def _run_cmd(cmd, comment, filter): |
| 79 | + _print_header("Running suite for: {}", comment) |
| 80 | + _print_format("### {}", " ".join(cmd)) |
| 81 | + |
| 82 | + # Check tests are found |
| 83 | + if not _check_filter(cmd, filter): |
| 84 | + _print_end_header() |
| 85 | + _print_error("Could not find any tests with this filter") |
| 86 | + return 2 |
33 | 87 |
|
34 |
| - parser = ArgumentParser() |
| 88 | + sys.stdout.flush() |
| 89 | + result = subprocess.call( # nosec B603 |
| 90 | + cmd, |
| 91 | + stdout=sys.stdout, |
| 92 | + stderr=sys.stdout, |
| 93 | + env=(os.environ | {"GTEST_FILTER": filter}), |
| 94 | + ) |
| 95 | + _print_end_header() |
| 96 | + return result |
| 97 | + |
| 98 | + |
| 99 | +if __name__ == "__main__": |
| 100 | + parser = argparse.ArgumentParser() |
35 | 101 | parser.add_argument("--test_command", help="Ctest test case")
|
36 |
| - parser.add_argument("--devices_count", type=str, help="Number of devices on which tests will be run") |
37 |
| - parser.add_argument("--platforms_count", type=str, help="Number of platforms on which tests will be run") |
38 |
| - parser.add_argument("--backend", type=str, help="Number of platforms on which tests will be run") |
| 102 | + parser.add_argument("--failslist", type=str, help="Failure list") |
| 103 | + parser.add_argument("--", dest="ignored", action="store_true") |
| 104 | + parser.add_argument("rest", nargs=argparse.REMAINDER) |
39 | 105 | args = parser.parse_args()
|
40 |
| - invocation = [ |
41 |
| - args.test_command, |
42 |
| - "--gtest_brief=1", |
43 |
| - f"--devices_count={args.devices_count}", |
44 |
| - f"--platforms_count={args.platforms_count}", |
45 |
| - f"--backend={args.backend}", |
46 |
| - ] |
47 |
| - _print_cmdline(invocation, os.environ, os.getcwd()) |
48 |
| - |
49 |
| - result = subprocess.Popen( # nosec B603 |
50 |
| - invocation, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True |
51 |
| - ) |
52 | 106 |
|
53 |
| - pat = re.compile(r'\[( )*FAILED( )*\]') |
54 |
| - output_list = [] |
55 |
| - test_cases = [] |
56 |
| - for line in result.stdout: |
57 |
| - output_list.append(line) |
58 |
| - if pat.search(line): |
59 |
| - test_case = line.split(" ")[5] |
60 |
| - test_case = test_case.rstrip(',') |
61 |
| - test_cases.append(test_case) |
62 |
| - |
63 |
| - # Every fail has a single corresponding match line but if there are multiple |
64 |
| - # devices being tested there will be multiple lines with the same failure |
65 |
| - # message. To avoid matching mismatch, remove lines that differ only by device ID. |
66 |
| - test_cases = [re.sub(r'ID[0-9]ID', 'X', tc) for tc in test_cases] |
67 |
| - test_cases = list(OrderedDict.fromkeys(test_cases)) |
68 |
| - |
69 |
| - for tc in test_cases: |
70 |
| - print(tc) |
71 |
| - |
72 |
| - rc = result.wait() |
73 |
| - if rc < 0: |
74 |
| - print(signal.strsignal(abs(rc))) |
75 |
| - |
76 |
| - print("#### GTEST_OUTPUT ####", file=sys.stderr) |
77 |
| - print(''.join(output_list), file=sys.stderr) |
78 |
| - print("#### GTEST_OUTPUT_END ####", file=sys.stderr) |
| 107 | + base_invocation = [args.test_command] + args.rest |
| 108 | + |
| 109 | + if os.environ.get("GTEST_OUTPUT") is not None: |
| 110 | + # We are being ran purely to generate an output file (likely for ctest_parser.py); falling back to just using |
| 111 | + # one test execution |
| 112 | + sys.exit( |
| 113 | + subprocess.call( # nosec B603 |
| 114 | + base_invocation, stdout=sys.stdout, stderr=sys.stderr |
| 115 | + ) |
| 116 | + ) |
| 117 | + |
| 118 | + _print_environ(os.environ) |
| 119 | + |
| 120 | + # Parse fails list |
| 121 | + _print_format("Loading fails from {}", args.failslist) |
| 122 | + fail_patterns = [] |
| 123 | + expected_fail = False |
| 124 | + with open(args.failslist) as f: |
| 125 | + for l in f: |
| 126 | + optional = "{{OPT}}" in l |
| 127 | + l = l.replace("{{OPT}}", "") |
| 128 | + l = l.replace("{{.*}}", "*") |
| 129 | + |
| 130 | + if l.startswith("{{Segmentation fault"): |
| 131 | + expected_fail = True |
| 132 | + continue |
| 133 | + if l.startswith("#"): |
| 134 | + continue |
| 135 | + if l.startswith("{{NONDETERMINISTIC}}"): |
| 136 | + continue |
| 137 | + if l.strip() == "": |
| 138 | + continue |
| 139 | + |
| 140 | + fail_patterns.append( |
| 141 | + { |
| 142 | + "pattern": l.strip(), |
| 143 | + "optional": optional, |
| 144 | + } |
| 145 | + ) |
| 146 | + |
| 147 | + _print_header("Known failing tests") |
| 148 | + for fail in fail_patterns: |
| 149 | + _print_format("> {}", fail) |
| 150 | + _print_end_header() |
| 151 | + if len(fail_patterns) == 0: |
| 152 | + _print_error( |
| 153 | + "Fail list is empty, if there are no more failures, please remove the file" |
| 154 | + ) |
| 155 | + sys.exit(2) |
| 156 | + |
| 157 | + final_result = 0 |
| 158 | + |
| 159 | + # First, run all the known good tests |
| 160 | + gtest_filter = "-" + (":".join(map(lambda x: x["pattern"], fail_patterns))) |
| 161 | + if _check_filter(base_invocation, gtest_filter): |
| 162 | + result = _run_cmd(base_invocation, "known good tests", gtest_filter) |
| 163 | + if result != 0 and not expected_fail: |
| 164 | + _print_error("Tests we expected to pass have failed") |
| 165 | + final_result = result |
| 166 | + else: |
| 167 | + _print_format("Note: No tests in this suite are expected to pass") |
| 168 | + |
| 169 | + # Then run each known failing tests |
| 170 | + for fail in fail_patterns: |
| 171 | + result = _run_cmd( |
| 172 | + base_invocation, "failing test {}".format(fail["pattern"]), fail["pattern"] |
| 173 | + ) |
| 174 | + |
| 175 | + if result == 0 and not fail["optional"]: |
| 176 | + _print_error( |
| 177 | + "Test {} is passing when we expect it to fail!", fail["pattern"] |
| 178 | + ) |
| 179 | + final_result = 1 |
| 180 | + |
| 181 | + sys.exit(final_result) |
0 commit comments