|
| 1 | +#!/usr/bin/env python |
| 2 | +""" |
| 3 | + Copyright (C) 2022 Intel Corporation |
| 4 | +
|
| 5 | + Part of the Unified-Runtime Project, under the Apache License v2.0 with LLVM Exceptions. |
| 6 | + See LICENSE.TXT |
| 7 | + SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 8 | +""" |
| 9 | + |
| 10 | +from subprocess import Popen, DEVNULL, PIPE |
| 11 | +import argparse |
| 12 | +import os |
| 13 | +import json |
| 14 | + |
| 15 | +TMP_RESULTS_FILE = "tmp-results-file.json" |
| 16 | + |
| 17 | +def get_cts_test_suite_names(working_directory): |
| 18 | + process = Popen(["ctest", "--show-only=json-v1"], cwd=working_directory, |
| 19 | + stdout=PIPE, env=os.environ.copy()) |
| 20 | + out,_ = process.communicate() |
| 21 | + testsuites = json.loads(out) |
| 22 | + return [test['name']for test in testsuites['tests']] |
| 23 | + |
| 24 | +def percent(amount, total): |
| 25 | + return round((amount / total) * 100, 2) |
| 26 | + |
| 27 | +def summarize_results(results): |
| 28 | + total = results['Total'] |
| 29 | + total_passed = len(results['Passed']) |
| 30 | + total_skipped = len(results['Skipped']) |
| 31 | + total_failed = len(results['Failed']) |
| 32 | + total_crashed = total - (total_passed + total_skipped + total_failed) |
| 33 | + |
| 34 | + pass_rate_incl_skipped = percent(total_passed, total) |
| 35 | + pass_rate_excl_skipped = percent(total_passed, total - total_skipped) |
| 36 | + |
| 37 | + skipped_rate = percent(total_skipped, total) |
| 38 | + failed_rate = percent(total_failed, total) |
| 39 | + crash_rate = percent(total_crashed, total) |
| 40 | + |
| 41 | + ljust_param = len(str(total)) |
| 42 | + |
| 43 | + print( |
| 44 | +f"""[CTest Parser] Results: |
| 45 | + Total [{str(total).ljust(ljust_param)}] |
| 46 | + Passed [{str(total_passed).ljust(ljust_param)}] ({pass_rate_incl_skipped}%) - ({pass_rate_excl_skipped}% with skipped tests excluded) |
| 47 | + Skipped [{str(total_skipped).ljust(ljust_param)}] ({skipped_rate}%) |
| 48 | + Failed [{str(total_failed).ljust(ljust_param)}] ({failed_rate}%) |
| 49 | + Crashed [{str(total_crashed).ljust(ljust_param)}] ({crash_rate}%) |
| 50 | +""" |
| 51 | + ) |
| 52 | + |
| 53 | +def parse_results(results): |
| 54 | + parsed_results = {"Passed": {}, "Skipped":{}, "Failed": {}, 'Crashed': {}, 'Total':0, 'Success':True} |
| 55 | + for _, result in results.items(): |
| 56 | + if result['actual'] is None: |
| 57 | + parsed_results['Success'] = False |
| 58 | + parsed_results['Total'] += result['expected']['tests'] |
| 59 | + continue |
| 60 | + |
| 61 | + parsed_results['Total'] += result['actual']['tests'] |
| 62 | + for testsuite in result['actual'].get('testsuites'): |
| 63 | + for test in testsuite.get('testsuite'): |
| 64 | + test_name = f"{testsuite['name']}.{test['name']}" |
| 65 | + test_time = test['time'] |
| 66 | + if 'failures' in test: |
| 67 | + parsed_results['Failed'][test_name] = {'time': test_time} |
| 68 | + elif test['result'] == 'SKIPPED': |
| 69 | + parsed_results['Skipped'][test_name] = {'time': test_time} |
| 70 | + else: |
| 71 | + parsed_results['Passed'][test_name] = {'time': test_time} |
| 72 | + return parsed_results |
| 73 | + |
| 74 | +def run(args): |
| 75 | + results = {} |
| 76 | + |
| 77 | + tmp_results_file = f"{args.ctest_path}/{TMP_RESULTS_FILE}" |
| 78 | + env = os.environ.copy() |
| 79 | + env['GTEST_OUTPUT'] = f"json:{tmp_results_file}" |
| 80 | + |
| 81 | + test_suite_names = get_cts_test_suite_names(f"{args.ctest_path}/test/conformance/") |
| 82 | + |
| 83 | + ## try and list all the available tests |
| 84 | + for suite in test_suite_names: |
| 85 | + results[suite] = {} |
| 86 | + test_executable = f"{args.ctest_path}/bin/test-{suite}" |
| 87 | + process = Popen([test_executable, "--gtest_list_tests"], env=env, |
| 88 | + stdout=DEVNULL if args.quiet else None, |
| 89 | + stderr=DEVNULL if args.quiet else None) |
| 90 | + process.wait() |
| 91 | + try: |
| 92 | + with open(tmp_results_file,'r') as test_list: |
| 93 | + all_tests = json.load(test_list) |
| 94 | + results[suite]['expected'] = all_tests |
| 95 | + os.remove(tmp_results_file) |
| 96 | + except FileNotFoundError: |
| 97 | + print(f"Could not discover tests for {suite}") |
| 98 | + |
| 99 | + for suite in test_suite_names: |
| 100 | + ctest_path = f"{args.ctest_path}/test/conformance/{suite}" |
| 101 | + process = Popen(['ctest',ctest_path], env=env, cwd=ctest_path, |
| 102 | + stdout=DEVNULL if args.quiet else None, |
| 103 | + stderr=DEVNULL if args.quiet else None) |
| 104 | + process.wait() |
| 105 | + |
| 106 | + try: |
| 107 | + with open(tmp_results_file, 'r') as results_file: |
| 108 | + json_data = json.load(results_file) |
| 109 | + results[suite]['actual'] = json_data |
| 110 | + os.remove(tmp_results_file) |
| 111 | + except FileNotFoundError: |
| 112 | + results[suite]['actual'] = None |
| 113 | + print('\033[91m' + f"Conformance test suite '{suite}' : likely crashed!" + '\033[0m') |
| 114 | + |
| 115 | + return results |
| 116 | + |
| 117 | +def dir_path(string): |
| 118 | + if os.path.isdir(string): |
| 119 | + return os.path.abspath(string) |
| 120 | + else: |
| 121 | + raise NotADirectoryError(string) |
| 122 | + |
| 123 | +def main(): |
| 124 | + parser = argparse.ArgumentParser() |
| 125 | + parser.add_argument('ctest_path', type=dir_path, nargs='?', default='.', |
| 126 | + help='Optional path to test directory containing ' |
| 127 | + 'CTestTestfile. Defaults to current directory.') |
| 128 | + parser.add_argument('-q', '--quiet', action='store_true', |
| 129 | + help='Output only failed tests.') |
| 130 | + args = parser.parse_args() |
| 131 | + |
| 132 | + raw_results = run(args) |
| 133 | + parsed_results = parse_results(raw_results) |
| 134 | + summarize_results(parsed_results) |
| 135 | + |
| 136 | +if __name__ == '__main__': |
| 137 | + try: |
| 138 | + main() |
| 139 | + except KeyboardInterrupt: |
| 140 | + exit(130) |
0 commit comments