|
| 1 | + |
| 2 | +import configparser |
| 3 | +import pathlib |
| 4 | + |
| 5 | +import h5py |
| 6 | +import numpy as np |
| 7 | +import argparse |
| 8 | + |
| 9 | +import asyncio |
| 10 | + |
| 11 | + |
| 12 | +# argument parsing |
| 13 | +parser = argparse.ArgumentParser(description='THOR tester tool') |
| 14 | + |
| 15 | +parser.add_argument("-t", |
| 16 | + "--testset", |
| 17 | + action='store', |
| 18 | + metavar='TESTSET', |
| 19 | + help='test set to run', |
| 20 | + default="fast") |
| 21 | + |
| 22 | +args = parser.parse_args() |
| 23 | + |
| 24 | + |
| 25 | +# need command line arguments for those |
| 26 | +base_output_dir = pathlib.Path('testing') |
| 27 | +test_data_dir = pathlib.Path('test_data') |
| 28 | + |
| 29 | +run_set_sel = args.testset |
| 30 | + |
| 31 | +# colors for console output |
| 32 | +W = '\033[0m' # white (normal) |
| 33 | +R = '\033[31m' # red |
| 34 | +G = '\033[32m' # green |
| 35 | +O = '\033[33m' # orange |
| 36 | +B = '\033[34m' # blue |
| 37 | +P = '\033[35m' # purple |
| 38 | + |
| 39 | +# HDF5 helper classes for comparison tests |
| 40 | + |
| 41 | + |
| 42 | +def h5diff(ref_file, dat_file, epsilon=None): |
| 43 | + if not ref_file.exists(): |
| 44 | + print("No ref file {}".format(ref_file)) |
| 45 | + return False |
| 46 | + |
| 47 | + if not dat_file.exists(): |
| 48 | + print("No dat file {}".format(dat_file)) |
| 49 | + return False |
| 50 | + |
| 51 | + ref = h5py.File(str(ref_file)) |
| 52 | + dat = h5py.File(str(dat_file)) |
| 53 | + |
| 54 | + comp = True |
| 55 | + for datasetname in ref: |
| 56 | + if datasetname not in dat: |
| 57 | + print("No dataset {} in {}".format(datasetname, dat)) |
| 58 | + comp = False |
| 59 | + else: |
| 60 | + # load data set |
| 61 | + dat_data = np.array(dat[datasetname]) |
| 62 | + |
| 63 | + ref_data = np.array(ref[datasetname]) |
| 64 | + if epsilon is None: |
| 65 | + if (dat_data != ref_data).any(): |
| 66 | + print("dataset {} mismatch in {}".format(datasetname, dat)) |
| 67 | + comp = False |
| 68 | + else: |
| 69 | + if (np.abs(dat_data - ref_data) > epsilon).any(): |
| 70 | + print("dataset {} mismatch in {} with epsilon {}".format( |
| 71 | + datasetname, dat, epsilon)) |
| 72 | + comp = False |
| 73 | + |
| 74 | + return comp |
| 75 | + |
| 76 | + |
| 77 | +def compare_h5files(name, output_dir, ref_dir, params=None): |
| 78 | + comparisons = params['comparisons'] |
| 79 | + |
| 80 | + epsilon = None |
| 81 | + if 'epsilon' in params: |
| 82 | + epsilon = params['epsilon'] |
| 83 | + |
| 84 | + equal = True |
| 85 | + |
| 86 | + for filename in comparisons: |
| 87 | + |
| 88 | + if not h5diff(ref_dir / name / filename, output_dir / name / filename, epsilon): |
| 89 | + equal = False |
| 90 | + # do some test, return value |
| 91 | + return equal |
| 92 | + |
| 93 | +# Dummy test function for comparison |
| 94 | + |
| 95 | + |
| 96 | +def testfunction(name, output_dir, ref_dir, params=None): |
| 97 | + # do some test, return value |
| 98 | + return True |
| 99 | + |
| 100 | + |
| 101 | +###################################################################### |
| 102 | +# test sets definition |
| 103 | +grid_and_startup_set = [ |
| 104 | + # standard earth |
| 105 | + # test startup, grid file, planet file, and two first output files |
| 106 | + {'name': 'earth_hs_grid_4', |
| 107 | + 'base_ifile': 'ifile/earth_hstest.thr', |
| 108 | + 'command_options': [], |
| 109 | + 'override': {'num_steps': '10', |
| 110 | + 'n_out': '10', |
| 111 | + 'glevel': '4', |
| 112 | + 'vlevel': '32'}, |
| 113 | + 'status': 0, |
| 114 | + 'compare_func': compare_h5files, |
| 115 | + 'compare_params': {'comparisons': ['esp_output_grid_Earth.h5', |
| 116 | + 'esp_output_planet_Earth.h5', |
| 117 | + 'esp_output_Earth_0.h5', |
| 118 | + 'esp_output_Earth_1.h5']}}, |
| 119 | + {'name': 'earth_hs_grid_5', |
| 120 | + 'base_ifile': 'ifile/earth_hstest.thr', |
| 121 | + 'command_options': [], |
| 122 | + 'override': {'num_steps': '10', |
| 123 | + 'n_out': '10', |
| 124 | + 'glevel': '5', |
| 125 | + 'vlevel': '32'}, |
| 126 | + 'status': 0, |
| 127 | + 'compare_func': compare_h5files, |
| 128 | + 'compare_params': {'comparisons': ['esp_output_grid_Earth.h5', |
| 129 | + 'esp_output_planet_Earth.h5', |
| 130 | + 'esp_output_Earth_0.h5', |
| 131 | + 'esp_output_Earth_1.h5']}}, |
| 132 | + {'name': 'earth_hs_grid_6', |
| 133 | + 'base_ifile': 'ifile/earth_hstest.thr', |
| 134 | + 'command_options': [], |
| 135 | + 'override': {'num_steps': '10', |
| 136 | + 'n_out': '10', |
| 137 | + 'glevel': '6', |
| 138 | + 'vlevel': '32'}, |
| 139 | + 'status': 0, |
| 140 | + 'compare_func': compare_h5files, |
| 141 | + 'compare_params': {'comparisons': ['esp_output_grid_Earth.h5', |
| 142 | + 'esp_output_planet_Earth.h5', |
| 143 | + 'esp_output_Earth_0.h5', |
| 144 | + 'esp_output_Earth_1.h5']}}, |
| 145 | +] |
| 146 | + |
| 147 | +# fast checks, just checking that it did not crash |
| 148 | +fast_set = [ |
| 149 | + # standard earth |
| 150 | + {'name': 'earth_hs', |
| 151 | + 'base_ifile': 'ifile/earth_hstest.thr', |
| 152 | + 'command_options': [], |
| 153 | + 'override': {'num_steps': '10'}, |
| 154 | + 'status': 0, |
| 155 | + 'compare_func': testfunction, |
| 156 | + 'compare_params': {'param': 'novalue'}}, |
| 157 | + |
| 158 | + # wrong config option, should fail |
| 159 | + {'name': 'shouldfail', |
| 160 | + 'base_ifile': 'ifile/earth_hstest.thr', |
| 161 | + 'command_options': [], |
| 162 | + 'override': {'num_steps': '00'}, |
| 163 | + 'status': 255, |
| 164 | + 'compare_func': testfunction, |
| 165 | + 'compare_params': {'param': 'novalue'}}, |
| 166 | + |
| 167 | + # does not start from rest |
| 168 | + {'name': 'earth_hs_norest', |
| 169 | + 'base_ifile': 'ifile/earth_hstest.thr', |
| 170 | + 'command_options': [], |
| 171 | + 'override': {'num_steps': '10', |
| 172 | + 'rest': 'false', |
| 173 | + 'initial': 'test_data/esp_initial.h5'}, |
| 174 | + 'status': 0, |
| 175 | + 'compare_func': testfunction, |
| 176 | + 'compare_params': {'param': 'novalue'}}, |
| 177 | + |
| 178 | + |
| 179 | + # deepHJ |
| 180 | + {'name': 'deephj', |
| 181 | + 'base_ifile': 'ifile/deephj.thr', |
| 182 | + 'command_options': [], |
| 183 | + 'override': {'num_steps': '10'}, |
| 184 | + 'status': 0, |
| 185 | + 'compare_func': None, |
| 186 | + 'compare_params': None}, |
| 187 | + # Earth Sync |
| 188 | + {'name': 'earth_sync', |
| 189 | + 'base_ifile': 'ifile/earth_sync.thr', |
| 190 | + 'command_options': [], |
| 191 | + 'override': {'num_steps': '100'}, |
| 192 | + 'status': 0, |
| 193 | + 'compare_func': None, |
| 194 | + 'compare_params': None}, |
| 195 | + # ShallowHJ |
| 196 | + {'name': 'shallowhj', |
| 197 | + 'base_ifile': 'ifile/shallowhj.thr', |
| 198 | + 'command_options': [], |
| 199 | + 'override': {'num_steps': '100'}, |
| 200 | + 'status': 0, |
| 201 | + 'compare_func': None, |
| 202 | + 'compare_params': None}, |
| 203 | + # Planet of the Wasps |
| 204 | + {'name': 'wasp43b_ex', |
| 205 | + 'base_ifile': 'ifile/wasp43b_ex.thr', |
| 206 | + 'command_options': [], |
| 207 | + 'override': {'num_steps': '100'}, |
| 208 | + 'status': 0, |
| 209 | + 'compare_func': None, |
| 210 | + 'compare_params': None}, |
| 211 | +] |
| 212 | + |
| 213 | +# long tests |
| 214 | +slow_set = [ |
| 215 | + {'name': 'earth_hs', |
| 216 | + 'base_ifile': 'ifile/earth_hstest.thr', |
| 217 | + 'command_options': [], |
| 218 | + 'override': {'num_steps': '10000'}, |
| 219 | + 'status': 0, |
| 220 | + 'compare_func': None, |
| 221 | + 'compare_params': None} |
| 222 | +] |
| 223 | +###################################################################### |
| 224 | +# the simulation sets we can choose from |
| 225 | +simulation_sets = {'slow': slow_set, |
| 226 | + 'fast': fast_set, |
| 227 | + 'grid': grid_and_startup_set} |
| 228 | + |
| 229 | + |
| 230 | +run_set = simulation_sets[run_set_sel] |
| 231 | + |
| 232 | +# make output directory |
| 233 | +if not base_output_dir.exists(): |
| 234 | + base_output_dir.mkdir() |
| 235 | +else: |
| 236 | + print("Output {} already exists, can't run".format(str(base_output_dir))) |
| 237 | + exit(-1) |
| 238 | + |
| 239 | + |
| 240 | +# store results output for summary |
| 241 | +test_results = {} |
| 242 | + |
| 243 | + |
| 244 | +def log_result(name, result): |
| 245 | + print(name + ":\t" + result) |
| 246 | + if name in test_results: |
| 247 | + test_results[name].append(result) |
| 248 | + else: |
| 249 | + test_results[name] = [result] |
| 250 | + |
| 251 | +# asynchronous function to run a process, capture its output and print out its output |
| 252 | + |
| 253 | + |
| 254 | +async def run_subprocess(process_args): |
| 255 | + code = 'import datetime; print(datetime.datetime.now())' |
| 256 | + |
| 257 | + # Create the subprocess; redirect the standard output |
| 258 | + # into a pipe. |
| 259 | + proc = await asyncio.create_subprocess_exec(*process_args, |
| 260 | + stdout=asyncio.subprocess.PIPE, |
| 261 | + stderr=asyncio.subprocess.PIPE) |
| 262 | + |
| 263 | + # Read one line of output. |
| 264 | + result = '' |
| 265 | + while True: |
| 266 | + line = await proc.stdout.readline() |
| 267 | + if proc.stdout.at_eof(): |
| 268 | + break |
| 269 | + if line != b'': |
| 270 | + print(" " + line.decode('ascii'), end='') |
| 271 | + result += line.decode('ascii') |
| 272 | + |
| 273 | + # Wait for the subprocess exit. |
| 274 | + await proc.wait() |
| 275 | + return result, proc.stderr, proc.returncode |
| 276 | + |
| 277 | +# start event loop |
| 278 | +loop = asyncio.get_event_loop() |
| 279 | + |
| 280 | +for config_set in run_set: |
| 281 | + print(B+"Running {}".format(config_set['name'])+W) |
| 282 | + |
| 283 | + config_parser = configparser.ConfigParser() |
| 284 | + config_parser.optionxform = lambda option: option |
| 285 | + f = open(config_set['base_ifile']) |
| 286 | + conf = "[config]\n" + f.read() |
| 287 | + config_parser.read_string(conf) |
| 288 | + |
| 289 | + # override configs |
| 290 | + for key, value in config_set['override'].items(): |
| 291 | + config_parser['config'][key] = value |
| 292 | + |
| 293 | + output_dir = str(base_output_dir / config_set['name']) |
| 294 | + config_parser['config']['results_path'] = output_dir |
| 295 | + |
| 296 | + generated_config_name = base_output_dir / (config_set['name'] + ".thr") |
| 297 | + |
| 298 | + f = generated_config_name.open("w") |
| 299 | + |
| 300 | + for key, value in config_parser['config'].items(): |
| 301 | + f.write("{} = {}\n".format(key, value)) |
| 302 | + f.close() |
| 303 | + |
| 304 | + # run test |
| 305 | + command_options = config_set['command_options'] |
| 306 | + |
| 307 | + print("starting bin/esp on {} with options {}".format(str(generated_config_name), command_options)) |
| 308 | + stdout, stderr, returncode = loop.run_until_complete(run_subprocess(['bin/esp', |
| 309 | + str(generated_config_name)] + command_options |
| 310 | + )) |
| 311 | + |
| 312 | + # store output somewhere |
| 313 | + |
| 314 | + # check output status |
| 315 | + if returncode == config_set['status']: |
| 316 | + log_result(config_set['name'], G+"Finished running {} ended correctly".format( |
| 317 | + config_set['name'], returncode)+W) |
| 318 | + |
| 319 | + # check output data if we have a result evaluation function |
| 320 | + if config_set['compare_func'] is not None: |
| 321 | + compare_func = config_set['compare_func'] |
| 322 | + compare_parameters = config_set['compare_params'] |
| 323 | + compare_result = compare_func( |
| 324 | + config_set['name'], base_output_dir, test_data_dir, compare_parameters) |
| 325 | + if compare_result: |
| 326 | + log_result(config_set['name'], G+"data check passed"+W) |
| 327 | + else: |
| 328 | + log_result(config_set['name'], R+"data check failed"+W) |
| 329 | + |
| 330 | + else: |
| 331 | + log_result(config_set['name'], R+"Finished running {} failed with return code: ".format( |
| 332 | + config_set['name'], returncode) + W) |
| 333 | + log_result(config_set['name'], "return status for {}: {}".format( |
| 334 | + config_set['name'], returncode)) |
| 335 | + log_result(config_set['name'], "stdout:\n {}".format(stdout)) |
| 336 | + log_result(config_set['name'], "stderr:\n {}".format(stderr)) |
| 337 | + |
| 338 | + |
| 339 | +loop.close() |
| 340 | +print(72*"*") |
| 341 | +print(72*"*") |
| 342 | +print("**** SUMMARY") |
| 343 | +print(72*"*") |
| 344 | +for name, result in test_results.items(): |
| 345 | + for l in result: |
| 346 | + print(name + ":\t" + l) |
0 commit comments