|
7 | 7 | SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
8 | 8 | """
|
9 | 9 |
|
10 |
| -from sys import argv |
11 |
| -from subprocess import PIPE, Popen, STDOUT |
| 10 | +from subprocess import Popen, DEVNULL |
12 | 11 | import argparse
|
13 | 12 | import os
|
| 13 | +import json |
14 | 14 |
|
| 15 | +TMP_RESULTS_FILE = "tmp-results-file.json" |
| 16 | +CTS_TEST_SUITES = ["context", "device", "enqueue", "event", "kernel", "memory", |
| 17 | + "platform", "program", "queue", "runtime", "sampler", "usm", |
| 18 | + "virtual_memory"] |
15 | 19 |
|
16 |
| -def run(command, env): |
17 |
| - process = Popen(command, env=env, stdout=PIPE, |
18 |
| - stderr=STDOUT, cwd=command[1]) |
19 |
| - lines = process.communicate()[0].decode('utf-8').splitlines() |
20 |
| - results = {"Passed": {}, "Skipped": {}, "Failed": {}, 'Total': 0, 'Success': True} |
21 |
| - for line in lines: |
22 |
| - result_types = ['[ OK ]', '[ FAILED ]', '[ SKIPPED ]'] |
23 |
| - if any([x in line for x in result_types]) and line.endswith("ms)"): |
24 |
| - name, time = line[line.find(']') + 2:].split(' ', maxsplit=1) |
25 |
| - if 'OK' in line: |
26 |
| - results['Passed'][name] = {'time': time} |
27 |
| - elif 'SKIPPED' in line: |
28 |
| - results['Skipped'][name] = {'time': time} |
29 |
| - elif 'FAILED' in line: |
30 |
| - results['Failed'][name] = {'time': time} |
31 |
| - elif '[==========] Running' in line: |
32 |
| - # This is the start of a new test suite, get the number of tests in |
33 |
| - # the first line e.g: '[==========] Running 29 tests from 9 test suites.' |
34 |
| - total = line[line.find('g') + 2:line.find('t') - 1] |
35 |
| - results['Total'] += int(total) |
36 |
| - |
37 |
| - if process.returncode != 0: |
38 |
| - results['Success'] = False |
| 20 | +def percent(amount, total): |
| 21 | + return round((amount / total) * 100, 2) |
39 | 22 |
|
40 |
| - return results |
41 |
| - |
42 |
| - |
43 |
| -def print_results(results, result_type): |
44 |
| - print('[CTest Parser] {} tests: '.format(result_type)) |
45 |
| - print("\n".join("\t{}\t{}".format(k, v['time']) |
46 |
| - for k, v in results.items())) |
47 |
| - |
48 |
| - |
49 |
| -def print_summary(total, total_passed, total_failed, total_skipped, total_crashed): |
50 |
| - pass_rate_incl_skipped = str(round((total_passed / total) * 100, 2)) |
51 |
| - total_excl_skipped = total - total_skipped |
52 |
| - pass_rate_excl_skipped = str( |
53 |
| - round((total_passed / total_excl_skipped) * 100, 2)) |
| 23 | +def summarize_results(results): |
| 24 | + total = results['Total'] |
| 25 | + total_passed = len(results['Passed']) |
| 26 | + total_skipped = len(results['Skipped']) |
| 27 | + total_failed = len(results['Failed']) |
54 | 28 |
|
55 |
| - skipped_rate = str(round((total_skipped / total) * 100, 2)) |
56 |
| - failed_rate = str(round((total_failed / total) * 100, 2)) |
57 |
| - crashed_rate = str(round((total_crashed / total) * 100, 2)) |
| 29 | + pass_rate_incl_skipped = percent(total_passed, total) |
| 30 | + pass_rate_excl_skipped = percent(total_passed, total - total_skipped) |
58 | 31 |
|
59 |
| - print('[CTest Parser] Results:') |
60 |
| - print('\tTotal\t[{}]'. format(total)) |
61 |
| - print('\tPassed\t[{}]\t({}%) - ({}% with skipped tests excluded)'.format( |
62 |
| - total_passed, pass_rate_incl_skipped, pass_rate_excl_skipped)) |
63 |
| - print('\tSkipped\t[{}]\t({}%)'.format(total_skipped, skipped_rate)) |
64 |
| - print('\tFailed\t[{}]\t({}%)'.format(total_failed, failed_rate)) |
65 |
| - print('\tCrashed\t[{}]\t({}%)'.format(total_crashed, crashed_rate)) |
| 32 | + skipped_rate = percent(total_skipped, total) |
| 33 | + failed_rate = percent(total_failed, total) |
66 | 34 |
|
| 35 | + ljust_param = len(str(total)) |
| 36 | + |
| 37 | + print( |
| 38 | +f"""[CTest Parser] Results: |
| 39 | + Total [{str(total).ljust(ljust_param)}] |
| 40 | + Passed [{str(total_passed).ljust(ljust_param)}] ({pass_rate_incl_skipped}%) - ({pass_rate_excl_skipped}% with skipped tests excluded) |
| 41 | + Skipped [{str(total_skipped).ljust(ljust_param)}] ({skipped_rate}%) |
| 42 | + Failed [{str(total_failed).ljust(ljust_param)}] ({failed_rate}%) |
| 43 | +""" |
| 44 | + ) |
| 45 | + |
| 46 | +def parse_results(results): |
| 47 | + parsed_results = {"Passed": {}, "Skipped":{}, "Failed": {}, 'Total':0, 'Success':True} |
| 48 | + for _, result in results.items(): |
| 49 | + if result is None: |
| 50 | + parsed_results['Success'] = False |
| 51 | + continue |
| 52 | + |
| 53 | + parsed_results['Total'] += result['tests'] |
| 54 | + for testsuite in result.get('testsuites'): |
| 55 | + for test in testsuite.get('testsuite'): |
| 56 | + test_name = f"{testsuite['name']}.{test['name']}" |
| 57 | + test_time = test['time'] |
| 58 | + if 'failures' in test: |
| 59 | + parsed_results['Failed'][test_name] = {'time': test_time} |
| 60 | + elif test['result'] == 'SKIPPED': |
| 61 | + parsed_results['Skipped'][test_name] = {'time': test_time} |
| 62 | + else: |
| 63 | + parsed_results['Passed'][test_name] = {'time': test_time} |
| 64 | + return parsed_results |
| 65 | + |
| 66 | + |
| 67 | +def run(args): |
| 68 | + results = {} |
| 69 | + |
| 70 | + tmp_results_file = f"{args.ctest_path}/{TMP_RESULTS_FILE}" |
| 71 | + env = os.environ.copy() |
| 72 | + env['GTEST_OUTPUT'] = f"json:{tmp_results_file}" |
| 73 | + |
| 74 | + for suite in CTS_TEST_SUITES: |
| 75 | + ctest_path = f"{args.ctest_path}/test/conformance/{suite}" |
| 76 | + process = Popen(['ctest',ctest_path], env=env, cwd=ctest_path, |
| 77 | + stdout=DEVNULL if args.quiet else None, |
| 78 | + stderr=DEVNULL if args.quiet else None) |
| 79 | + process.wait() |
| 80 | + |
| 81 | + try: |
| 82 | + with open(tmp_results_file, 'r') as results_file: |
| 83 | + json_data = json.load(results_file) |
| 84 | + results[suite] = json_data |
| 85 | + os.remove(tmp_results_file) |
| 86 | + except FileNotFoundError: |
| 87 | + results[suite] = None |
| 88 | + print('\033[91m' + f"Conformance test suite '{suite}' : likely crashed!" + '\033[0m') |
| 89 | + |
| 90 | + return results |
67 | 91 |
|
68 | 92 | def dir_path(string):
|
69 | 93 | if os.path.isdir(string):
|
70 |
| - return string |
| 94 | + return os.path.abspath(string) |
71 | 95 | else:
|
72 | 96 | raise NotADirectoryError(string)
|
73 | 97 |
|
74 |
| - |
75 | 98 | def main():
|
76 |
| - parser = argparse.ArgumentParser( |
77 |
| - description='CTest Result Parser. Parses output from CTest and ' |
78 |
| - 'summarises test results. -VV argument is always passed to ' |
79 |
| - 'CTest capture full output.') |
80 |
| - |
| 99 | + parser = argparse.ArgumentParser() |
81 | 100 | parser.add_argument('ctest_path', type=dir_path, nargs='?', default='.',
|
82 | 101 | help='Optional path to test directory containing '
|
83 | 102 | 'CTestTestfile. Defaults to current directory.')
|
84 | 103 | parser.add_argument('-q', '--quiet', action='store_true',
|
85 | 104 | help='Output only failed tests.')
|
86 |
| - |
87 | 105 | args = parser.parse_args()
|
88 | 106 |
|
89 |
| - path = args.ctest_path |
90 |
| - command = ['ctest', path, '-VV'] |
91 |
| - |
92 |
| - env = os.environ.copy() |
93 |
| - env['GTEST_COLOR'] = 'no' |
94 |
| - env['CTEST_OUTPUT_ON_FAILURE'] = '0' |
95 |
| - env['GTEST_BRIEF'] = '0' |
96 |
| - env['GTEST_PRINT_TIME'] = '1' |
97 |
| - env['GTEST_PRINT_UTF8'] = '1' |
98 |
| - |
99 |
| - results = run(command, env) |
100 |
| - |
101 |
| - total = results['Total'] |
102 |
| - total_passed = len(results['Passed']) |
103 |
| - total_skipped = len(results['Skipped']) |
104 |
| - total_failed = len(results['Failed']) |
105 |
| - total_crashed = total - (total_passed + total_skipped + total_failed) |
106 |
| - |
107 |
| - if total > 0: |
108 |
| - print("[CTest Parser] Preparing results...") |
109 |
| - if args.quiet == False: |
110 |
| - if total_passed > 0: |
111 |
| - print_results(results['Passed'], 'Passed') |
112 |
| - if total_skipped > 0: |
113 |
| - print_results(results['Skipped'], 'Skipped') |
114 |
| - if total_failed > 0: |
115 |
| - print_results(results['Failed'], 'Failed') |
116 |
| - |
117 |
| - print_summary(total, total_passed, total_failed, |
118 |
| - total_skipped, total_crashed) |
119 |
| - if results['Success'] == False: |
120 |
| - exit(1) |
121 |
| - else: |
122 |
| - print("[CTest Parser] Error: no tests were run") |
123 |
| - |
| 107 | + raw_results = run(args) |
| 108 | + parsed_results = parse_results(raw_results) |
| 109 | + summarize_results(parsed_results) |
124 | 110 |
|
125 | 111 | if __name__ == '__main__':
|
126 | 112 | try:
|
|
0 commit comments