diff --git a/.gitmodules b/.gitmodules index 98c3df68fd..9e228bb85d 100644 --- a/.gitmodules +++ b/.gitmodules @@ -10,3 +10,6 @@ [submodule "hls4ml/templates/catapult/ac_math"] path = hls4ml/templates/catapult/ac_math url = https://github.com/hlslibs/ac_math.git +[submodule "test/pytest/baselines"] + path = test/pytest/baselines + url = https://github.com/marco66colombo/baselines.git diff --git a/test/pytest/baselines b/test/pytest/baselines new file mode 160000 index 0000000000..18144e1379 --- /dev/null +++ b/test/pytest/baselines @@ -0,0 +1 @@ +Subproject commit 18144e13796333a271029ba9dc46b570256d3157 diff --git a/test/pytest/ci-template.yml b/test/pytest/ci-template.yml index 873fe0fec4..4dede94312 100644 --- a/test/pytest/ci-template.yml +++ b/test/pytest/ci-template.yml @@ -9,11 +9,21 @@ - source /opt/intel/oneapi/setvars.sh --force - git config --global --add safe.directory /builds/fastmachinelearning/hls4ml - git submodule update --init --recursive hls4ml/templates/catapult/ + - git submodule update --init --recursive test/pytest/ - if [ $EXAMPLEMODEL == 1 ]; then git submodule update --init example-models; fi - pip install .[testing,sr,optimization] + - export RUN_SYNTHESIS=true + - export TOOL_VERSION=2020.1 + - mkdir -p cmd_vivado_${TOOL_VERSION} + - echo '#!/bin/bash' > cmd_vivado_${TOOL_VERSION}/vivado_hls + - echo "apptainer exec /cvmfs/projects.cern.ch/hls4ml/vivado/2020.1_v1/vivado-2020.1_v1/ vivado_hls \"\$@\"" >> cmd_vivado_${TOOL_VERSION}/vivado_hls + - chmod +x cmd_vivado_${TOOL_VERSION}/vivado_hls + - export PATH=$PWD/cmd_vivado_${TOOL_VERSION}:$PATH + - which vivado_hls + - vivado_hls -version script: - cd test/pytest - - pytest $PYTESTFILE -rA --cov-report xml --cov-report term --cov=hls4ml --junitxml=report.xml --randomly-seed=42 --randomly-dont-reorganize --randomly-dont-reset-seed + - pytest -s $PYTESTFILE -rA --cov-report xml --cov-report term --cov=hls4ml --junitxml=report.xml --randomly-seed=42 --randomly-dont-reorganize --randomly-dont-reset-seed artifacts: when: always reports: @@ -24,3 +34,4 @@ path: test/pytest/coverage.xml paths: - test/pytest/hls4mlprj*.tar.gz + - test/pytest/synthesis_report_*.json diff --git a/test/pytest/conftest.py b/test/pytest/conftest.py new file mode 100644 index 0000000000..752816bbb1 --- /dev/null +++ b/test/pytest/conftest.py @@ -0,0 +1,35 @@ +import os + +import pytest + + +def str_to_bool(val): + return str(val).lower() in ("1", "true") + + +@pytest.fixture(scope="module") +def synthesis_config(): + """ + Fixture that provides synthesis configuration for tests. + + It gathers: + - Whether synthesis should be run (from the RUN_SYNTHESIS env var) + - Tool versions for each supported backend (from env vars) + - Build arguments specific to each backend toolchain + + """ + return { + "run_synthesis": str_to_bool(os.getenv("RUN_SYNTHESIS", "false")), + "tools_version": { + "Vivado": os.getenv("VIVADO_VERSION", "2020.1"), + "Vitis": os.getenv("VITIS_VERSION", "2020.1"), + "Quartus": os.getenv("QUARTUS_VERSION", "latest"), + "oneAPI": os.getenv("ONEAPI_VERSION", "2025.0.1"), + }, + "build_args": { + "Vivado": {"csim": False, "synth": True, "export": False}, + "Vitis": {"csim": False, "synth": True, "export": False}, + "Quartus": {"synth": True, "fpgasynth": False}, + "oneAPI": {"build_type": "report", "run": False}, + }, + } diff --git a/test/pytest/generate_ci_yaml.py b/test/pytest/generate_ci_yaml.py index adc3d680ab..9e4914d78e 100644 --- a/test/pytest/generate_ci_yaml.py +++ b/test/pytest/generate_ci_yaml.py @@ -1,84 +1,150 @@ -import itertools -import os -from pathlib import Path - import yaml ''' -Create a Gitlab CI yml file with a separate entry for each test_* file -in the pytests directory to parallelise the CI jobs. +Minimal GitLab CI yml generator for testing purposes. +Generates a single job entry for test_keras_api.py::test_activations. ''' - template = """ -pytest.{}: +pytest.keras_api_part2: extends: .pytest variables: - PYTESTFILE: {} - EXAMPLEMODEL: {} + PYTESTFILE: test_keras_api.py::test_conv1d + EXAMPLEMODEL: 0 """ -n_test_files_per_yml = int(os.environ.get('N_TESTS_PER_YAML', 4)) - -# Blacklisted tests will be skipped -BLACKLIST = {'test_reduction'} - -# Long-running tests will not be bundled with other tests -LONGLIST = {'test_hgq_layers', 'test_hgq_players', 'test_qkeras', 'test_pytorch_api'} - - -def path_to_name(test_path): - path = Path(test_path) - name = path.stem.replace('test_', '') - return name - - -def batched(iterable, chunk_size): - iterator = iter(iterable) - while chunk := tuple(itertools.islice(iterator, chunk_size)): - yield chunk - - -def uses_example_model(test_filename): - with open(test_filename) as f: - content = f.read() - return 'example-models' in content - - -def generate_test_yaml(test_root='.'): - test_root = Path(test_root) - test_paths = [path for path in test_root.glob('**/test_*.py') if path.stem not in (BLACKLIST | LONGLIST)] - need_example_models = [uses_example_model(path) for path in test_paths] - - idxs = list(range(len(need_example_models))) - idxs = sorted(idxs, key=lambda i: f'{need_example_models[i]}_{path_to_name(test_paths[i])}') - - yml = None - for batch_idxs in batched(idxs, n_test_files_per_yml): - batch_paths: list[Path] = [test_paths[i] for i in batch_idxs] - names = [path_to_name(path) for path in batch_paths] - name = '+'.join(names) - test_files = ' '.join([str(path.relative_to(test_root)) for path in batch_paths]) - batch_need_example_model = int(any([need_example_models[i] for i in batch_idxs])) - diff_yml = yaml.safe_load(template.format(name, test_files, batch_need_example_model)) - if yml is None: - yml = diff_yml - else: - yml.update(diff_yml) - - test_paths = [path for path in test_root.glob('**/test_*.py') if path.stem in LONGLIST] - for path in test_paths: - name = path.stem.replace('test_', '') - test_file = str(path.relative_to(test_root)) - needs_examples = uses_example_model(path) - diff_yml = yaml.safe_load(template.format(name, test_file, int(needs_examples))) - yml.update(diff_yml) - - return yml +def generate_test_yaml(): + return yaml.safe_load(template) if __name__ == '__main__': - yml = generate_test_yaml(Path(__file__).parent) + yml = generate_test_yaml() with open('pytests.yml', 'w') as yamlfile: yaml.safe_dump(yml, yamlfile) + +# import ast +# import itertools +# import os +# from pathlib import Path + +# import yaml + +# ''' +# Create a Gitlab CI yml file with a separate entry for each test_* file +# in the pytests directory to parallelise the CI jobs. +# ''' + + +# template = """ +# pytest.{}: +# extends: .pytest +# variables: +# PYTESTFILE: {} +# EXAMPLEMODEL: {} +# """ + + +# n_test_files_per_yml = int(os.environ.get('N_TESTS_PER_YAML', 4)) + +# # Blacklisted tests will be skipped +# BLACKLIST = {'test_reduction'} + +# # Long-running tests will not be bundled with other tests +# LONGLIST = {'test_hgq_layers', 'test_hgq_players', 'test_qkeras', 'test_pytorch_api'} + +# # Test files to split by individual test cases (stem only, no .py) +# # Value = chunk size per CI job +# SPLIT_BY_TEST_CASE = { +# 'test_keras_api': 1, +# } + + +# def collect_test_functions_from_ast(test_file): +# """Collect all test function names using AST parsing (no imports).""" +# with open(test_file, encoding='utf-8') as f: +# tree = ast.parse(f.read(), filename=str(test_file)) + +# test_funcs = [] +# for node in tree.body: +# if isinstance(node, ast.FunctionDef) and node.name.startswith("test"): +# test_funcs.append(f"{test_file}::{node.name}") +# return test_funcs + + +# def batched(iterable, batch_size): +# it = iter(iterable) +# while batch := list(itertools.islice(it, batch_size)): +# yield batch + + +# def path_to_name(test_path): +# path = Path(test_path) +# name = path.stem.replace('test_', '') +# return name + + +# def uses_example_model(test_filename): +# with open(test_filename) as f: +# content = f.read() +# return 'example-models' in content + + +# def generate_test_yaml(test_root='.'): +# test_root = Path(test_root) +# test_paths = [ +# path +# for path in test_root.glob('**/test_*.py') +# if path.stem not in (BLACKLIST | LONGLIST | set(SPLIT_BY_TEST_CASE.keys())) +# ] +# need_example_models = [uses_example_model(path) for path in test_paths] + +# idxs = list(range(len(need_example_models))) +# idxs = sorted(idxs, key=lambda i: f'{need_example_models[i]}_{path_to_name(test_paths[i])}') + +# yml = None +# for batch_idxs in batched(idxs, n_test_files_per_yml): +# batch_paths: list[Path] = [test_paths[i] for i in batch_idxs] +# names = [path_to_name(path) for path in batch_paths] +# name = '+'.join(names) +# test_files = ' '.join([str(path.relative_to(test_root)) for path in batch_paths]) +# batch_need_example_model = int(any([need_example_models[i] for i in batch_idxs])) +# diff_yml = yaml.safe_load(template.format(name, test_files, batch_need_example_model)) +# if yml is None: +# yml = diff_yml +# else: +# yml.update(diff_yml) + +# test_paths = [path for path in test_root.glob('**/test_*.py') if path.stem in LONGLIST] +# for path in test_paths: +# name = path.stem.replace('test_', '') +# test_file = str(path.relative_to(test_root)) +# needs_examples = uses_example_model(path) +# diff_yml = yaml.safe_load(template.format(name, test_file, int(needs_examples))) +# yml.update(diff_yml) + +# test_paths = [path for path in test_root.glob('**/test_*.py') if path.stem in SPLIT_BY_TEST_CASE] +# for path in test_paths: +# stem = path.stem +# name_base = stem.replace('test_', '') +# test_file = str(path.relative_to(test_root)) +# test_ids = collect_test_functions_from_ast(test_file) +# chunk_size = SPLIT_BY_TEST_CASE[stem] +# needs_examples = uses_example_model(path) + +# for i, batch in enumerate(batched(test_ids, chunk_size)): +# job_name = f"{name_base}_part{i}" +# test_file_args = " ".join(batch).strip().replace("\n", " ") +# diff_yml = yaml.safe_load(template.format(job_name, test_file_args, int(needs_examples))) +# if yml is None: +# yml = diff_yml +# else: +# yml.update(diff_yml) + +# return yml + + +# if __name__ == '__main__': +# yml = generate_test_yaml(Path(__file__).parent) +# with open('pytests.yml', 'w') as yamlfile: +# yaml.safe_dump(yml, yamlfile) diff --git a/test/pytest/synthesis_helpers.py b/test/pytest/synthesis_helpers.py new file mode 100644 index 0000000000..280ed63458 --- /dev/null +++ b/test/pytest/synthesis_helpers.py @@ -0,0 +1,179 @@ +import json +from pathlib import Path + +import pytest + + +def get_baseline_path(baseline_file_name, backend, version): + """ + Construct the full path to a baseline synthesis report file. + + Args: + baseline_file_name (str): The name of the baseline report file. + backend (str): The backend used (e.g., 'Vivado', 'Vitis'). + version (str): The tool version (e.g., '2020.1'). + + Returns: + Path: A pathlib.Path object pointing to the baseline file location. + """ + return Path(__file__).parent / "baselines" / backend / version / baseline_file_name + + +def save_report(data, filename): + """ + Save synthesis data to a JSON file in the same directory as this script. + + Args: + data (dict): The synthesis output data to be saved. + filename (str): The filename to write to (e.g., 'synthesis_report_test_x.json'). + + Raises: + OSError: If the file cannot be written. + """ + out_path = Path(__file__).parent / filename + with open(out_path, "w") as fp: + json.dump(data, fp, indent=4) + + +def compare_dicts(data, baseline, tolerances): + """ + Compare two flat dictionaries with tolerances. + + Args: + report (dict): The generated report dictionary. + baseline (dict): The expected/baseline dictionary. + tolerances (dict): Dictionary of tolerances per key. + + Raises: + AssertionError: If values differ outside the allowed tolerance. + """ + for key, expected in baseline.items(): + actual = data.get(key) + tolerance = tolerances.get(key, 0) + + try: + actual = float(actual) + expected = float(expected) + assert actual == pytest.approx( + expected, rel=tolerance + ), f"{key}: expected {expected}, got {actual} (tolerance={tolerance*100}%)" + except ValueError: + assert actual == expected, f"{key}: expected '{expected}', got '{actual}'" + + +def compare_vitis_backend(data, baseline): + """ + Compare reports from Vivado/Vitis backends. + + Args: + data (dict): The current synthesis report. + baseline (dict): The expected synthesis report. + """ + + tolerances = { + "EstimatedClockPeriod": 0.01, + "FF": 0.05, + "LUT": 0.10, + "BRAM_18K": 0.0, + "DSP": 0.0, + "URAM": 0.0, + "AvailableBRAM_18K": 0.0, + "AvailableDSP": 0.0, + "AvailableFF": 0.0, + "AvailableLUT": 0.0, + "AvailableURAM": 0.0, + } + + compare_dicts(data["CSynthesisReport"], baseline["CSynthesisReport"], tolerances) + + +def compare_oneapi_backend(data, baseline): + """ + Compare reports from the oneAPI backend. + + Args: + data (dict): The current synthesis report. + baseline (dict): The expected synthesis report. + """ + + tolerances = { + "HLS": { + "total": {"alut": 0.01, "reg": 0.1, "ram": 0.01, "dsp": 0.01, "mlab": 0.01}, + "available": {"alut": 0.01, "reg": 0.01, "ram": 0.01, "dsp": 0.01, "mlab": 0.01}, + }, + "Loop": {"worstFrequency": 0.01, "worstII": 0.01, "worstLatency": 0.01}, + } + + data = data["report"] + baseline = baseline["report"] + + compare_dicts(data["HLS"]["total"], baseline["HLS"]["total"], tolerances["HLS"]["total"]) + compare_dicts(data["HLS"]["available"], baseline["HLS"]["available"], tolerances["HLS"]["available"]) + compare_dicts(data["Loop"], baseline["Loop"], tolerances["Loop"]) + + +COMPARE_FUNCS = { + "Vivado": compare_vitis_backend, + "Vitis": compare_vitis_backend, + "oneAPI": compare_oneapi_backend, +} + + +EXPECTED_REPORT_KEYS = { + "Vivado": {"CSynthesisReport"}, + "Vitis": {"CSynthesisReport"}, + "oneAPI": {"report"}, +} + + +def run_synthesis_test(config, hls_model, baseline_file_name, backend): + """ + Run HLS synthesis and compare the output with a stored baseline report. + + If synthesis is disabled via the configuration (`run_synthesis=False`), + no synthesis is executed and the method silently returns. + + Args: + config (dict): Test-wide synthesis configuration fixture. + hls_model (object): hls4ml model instance to build and synthesize. + baseline_file_name (str): The name of the baseline file for comparison. + backend (str): The synthesis backend used (e.g., 'Vivado', 'Vitis'). + """ + if not config.get("run_synthesis", False): + return + + # Skip Quartus backend + if backend == 'Quartus': + return + + # Run synthesis + build_args = config["build_args"] + try: + data = hls_model.build(**build_args.get(backend, {})) + except Exception as e: + pytest.fail(f"hls_model.build failed: {e}") + + # Save synthesis report + save_report(data, f"synthesis_report_{baseline_file_name}") + + # Check synthesis report keys + expected_keys = EXPECTED_REPORT_KEYS.get(backend, set()) + assert data and expected_keys.issubset( + data.keys() + ), f"Synthesis failed: Missing expected keys in synthesis report: expected {expected_keys}, got {set(data.keys())}" + + # Load baseline report + version = config["tools_version"].get(backend) + baseline_path = get_baseline_path(baseline_file_name, backend, version) + try: + with open(baseline_path) as fp: + baseline = json.load(fp) + except FileNotFoundError: + pytest.fail(f"Baseline file '{baseline_path}' not found.") + + # Compare report against baseline using backend-specific rules + compare_func = COMPARE_FUNCS.get(backend) + if compare_func is None: + raise AssertionError(f"No comparison function defined for backend: {backend}") + + compare_func(data, baseline) diff --git a/test/pytest/test_keras_api.py b/test/pytest/test_keras_api.py index 4bb9f03751..d11ab7e1a6 100644 --- a/test/pytest/test_keras_api.py +++ b/test/pytest/test_keras_api.py @@ -4,6 +4,7 @@ import numpy as np import pytest import tensorflow as tf +from synthesis_helpers import run_synthesis_test from tensorflow.keras.layers import ( ELU, Activation, @@ -27,7 +28,7 @@ @pytest.mark.parametrize('backend', ['Vivado', 'Vitis', 'Quartus', 'oneAPI']) @pytest.mark.parametrize('io_type', ['io_parallel', 'io_stream']) -def test_dense(backend, io_type): +def test_dense(backend, io_type, synthesis_config): model = tf.keras.models.Sequential() model.add( Dense( @@ -53,6 +54,7 @@ def test_dense(backend, io_type): config = hls4ml.utils.config_from_keras_model(model) output_dir = str(test_root_path / f'hls4mlprj_keras_api_dense_{backend}_{io_type}') + baseline_file_name = f'hls4mlprj_keras_api_dense_{backend}_{io_type}.json' hls_model = hls4ml.converters.convert_from_keras_model( model, hls_config=config, output_dir=output_dir, backend=backend, io_type=io_type @@ -74,6 +76,8 @@ def test_dense(backend, io_type): assert list(hls_model.get_layers())[2].attributes['activation'] == str(model.layers[1].activation).split()[1] assert list(hls_model.get_layers())[1].attributes['activation'] == str(model.layers[0].activation).split()[1] + run_synthesis_test(config=synthesis_config, hls_model=hls_model, baseline_file_name=baseline_file_name, backend=backend) + # TODO: add ThresholdedReLU test when it can be made to pass # https://github.com/fastmachinelearning/hls4ml/issues/376 @@ -92,7 +96,7 @@ def test_dense(backend, io_type): # ThresholdedReLU(theta=1.0)]) @pytest.mark.parametrize('backend', ['Vivado', 'Vitis', 'Quartus', 'oneAPI']) @pytest.mark.parametrize('io_type', ['io_parallel', 'io_stream']) -def test_activations(activation_function, backend, io_type): +def test_activations(activation_function, backend, io_type, synthesis_config): model = tf.keras.models.Sequential() model.add(Dense(64, input_shape=(1,), name='Dense', kernel_initializer='lecun_uniform', kernel_regularizer=None)) model.add(activation_function) @@ -102,6 +106,8 @@ def test_activations(activation_function, backend, io_type): keras_prediction = model.predict(X_input) config = hls4ml.utils.config_from_keras_model(model) output_dir = str(test_root_path / f'hls4mlprj_keras_api_activations_{activation_function.name}_{backend}_{io_type}') + baseline_file_name = f'hls4mlprj_keras_api_activations_{activation_function.name}_{backend}_{io_type}.json' + hls_model = hls4ml.converters.convert_from_keras_model( model, hls_config=config, output_dir=output_dir, backend=backend, io_type=io_type ) @@ -114,6 +120,8 @@ def test_activations(activation_function, backend, io_type): assert list(hls_model.get_layers())[2].attributes['class_name'] == activation_function.__class__.__name__ + run_synthesis_test(config=synthesis_config, hls_model=hls_model, baseline_file_name=baseline_file_name, backend=backend) + padds_options = ['same', 'valid'] @@ -131,7 +139,7 @@ def test_activations(activation_function, backend, io_type): ], ) @pytest.mark.parametrize('io_type', ['io_parallel', 'io_stream']) -def test_conv1d(padds, backend, strategy, io_type): +def test_conv1d(padds, backend, strategy, io_type, synthesis_config): model = tf.keras.models.Sequential() input_shape = (10, 128, 4) model.add( @@ -156,6 +164,8 @@ def test_conv1d(padds, backend, strategy, io_type): config = hls4ml.utils.config_from_keras_model(model) config['Model']['Strategy'] = strategy output_dir = str(test_root_path / f'hls4mlprj_keras_api_conv1d_{padds}_{backend}_{strategy}_{io_type}') + baseline_file_name = f'hls4mlprj_keras_api_conv1d_{padds}_{backend}_{strategy}_{io_type}.json' + hls_model = hls4ml.converters.convert_from_keras_model( model, hls_config=config, output_dir=output_dir, backend=backend, io_type=io_type ) @@ -196,6 +206,8 @@ def test_conv1d(padds, backend, strategy, io_type): assert list(hls_model.get_layers())[1].attributes['pad_left'] == 0 assert list(hls_model.get_layers())[1].attributes['pad_right'] == 0 + run_synthesis_test(config=synthesis_config, hls_model=hls_model, baseline_file_name=baseline_file_name, backend=backend) + chans_options = ['channels_last'] padds_options = ['same', 'valid'] @@ -215,7 +227,7 @@ def test_conv1d(padds, backend, strategy, io_type): ], ) @pytest.mark.parametrize('io_type', ['io_parallel', 'io_stream']) -def test_conv2d(chans, padds, backend, strategy, io_type): +def test_conv2d(chans, padds, backend, strategy, io_type, synthesis_config): model = tf.keras.models.Sequential() input_shape = (28, 28, 3) model.add( @@ -238,6 +250,8 @@ def test_conv2d(chans, padds, backend, strategy, io_type): config = hls4ml.utils.config_from_keras_model(model) config['Model']['Strategy'] = strategy output_dir = str(test_root_path / f'hls4mlprj_keras_api_conv2d_{backend}_{strategy}_{chans}_{padds}_{io_type}') + baseline_file_name = f'hls4mlprj_keras_api_conv2d_{backend}_{strategy}_{chans}_{padds}_{io_type}.json' + hls_model = hls4ml.converters.convert_from_keras_model( model, hls_config=config, output_dir=output_dir, backend=backend, io_type=io_type ) @@ -316,11 +330,13 @@ def test_conv2d(chans, padds, backend, strategy, io_type): assert list(hls_model.get_layers())[1].attributes['pad_left'] == 0 assert list(hls_model.get_layers())[1].attributes['pad_right'] == 0 + run_synthesis_test(config=synthesis_config, hls_model=hls_model, baseline_file_name=baseline_file_name, backend=backend) + # Currently only Vivado and Vitis is supported for io_stream. @pytest.mark.parametrize('backend', ['Vivado', 'Vitis']) @pytest.mark.parametrize('io_type', ['io_stream']) -def test_depthwise2d(backend, io_type): +def test_depthwise2d(backend, io_type, synthesis_config): ''' Test proper handling of DepthwiseConv2D ''' @@ -334,6 +350,8 @@ def test_depthwise2d(backend, io_type): model, granularity='name', default_precision='fixed<32,12>', backend=backend ) output_dir = str(test_root_path / f'hls4mlprj_keras_api_depthwiseconv2d_{backend}_{io_type}') + baseline_file_name = f'hls4mlprj_keras_api_depthwiseconv2d_{backend}_{io_type}.json' + hls_model = hls4ml.converters.convert_from_keras_model( model, hls_config=config, output_dir=output_dir, backend=backend, io_type=io_type ) @@ -344,11 +362,13 @@ def test_depthwise2d(backend, io_type): np.testing.assert_allclose(y_qkeras, y_hls4ml.reshape(y_qkeras.shape), rtol=1e-2, atol=0.01) + run_synthesis_test(config=synthesis_config, hls_model=hls_model, baseline_file_name=baseline_file_name, backend=backend) + # Currently only Vivado and Vitis is supported for io_stream. @pytest.mark.parametrize('backend', ['Vivado', 'Vitis']) @pytest.mark.parametrize('io_type', ['io_stream']) -def test_depthwise1d(backend, io_type): +def test_depthwise1d(backend, io_type, synthesis_config): ''' Test proper handling of DepthwiseConv1D. ''' @@ -360,6 +380,8 @@ def test_depthwise1d(backend, io_type): config = hls4ml.utils.config_from_keras_model(model, granularity='name', backend=backend) output_dir = str(test_root_path / f'hls4mlprj_keras_api_depthwiseconv1d_{backend}_{io_type}') + baseline_file_name = f'hls4mlprj_keras_api_depthwiseconv1d_{backend}_{io_type}.json' + hls_model = hls4ml.converters.convert_from_keras_model( model, hls_config=config, output_dir=output_dir, backend=backend, io_type=io_type ) @@ -370,6 +392,8 @@ def test_depthwise1d(backend, io_type): np.testing.assert_allclose(y_qkeras, y_hls4ml.reshape(y_qkeras.shape), rtol=1e-2, atol=0.01) + run_synthesis_test(config=synthesis_config, hls_model=hls_model, baseline_file_name=baseline_file_name, backend=backend) + pooling_layers = [MaxPooling1D, MaxPooling2D, AveragePooling1D, AveragePooling2D] @@ -378,7 +402,7 @@ def test_depthwise1d(backend, io_type): @pytest.mark.parametrize('padds', padds_options) @pytest.mark.parametrize('chans', chans_options) @pytest.mark.parametrize('backend', ['Vivado', 'Vitis', 'Quartus', 'oneAPI']) -def test_pooling(pooling, padds, chans, backend): +def test_pooling(pooling, padds, chans, backend, synthesis_config): assert '1D' in pooling.__name__ or '2D' in pooling.__name__ input_shape = (18, 15, 3) if '2D' in pooling.__name__ else (121, 3) @@ -392,6 +416,10 @@ def test_pooling(pooling, padds, chans, backend): output_dir = str( test_root_path / f'hls4mlprj_keras_api_pooling_{pooling.__name__}_channels_{chans}_padds_{padds}_backend_{backend}' ) + baseline_file_name = ( + f'hls4mlprj_keras_api_pooling_{pooling.__name__}_channels_{chans}_padds_{padds}_backend_{backend}.json' + ) + hls_model = hls4ml.converters.convert_from_keras_model( keras_model, hls_config=hls_cfg, output_dir=output_dir, backend=backend ) @@ -495,3 +523,5 @@ def test_pooling(pooling, padds, chans, backend): assert hls_pool.attributes['n_out'] == out_valid assert hls_pool.attributes['pad_left'] == 0 assert hls_pool.attributes['pad_right'] == 0 + + run_synthesis_test(config=synthesis_config, hls_model=hls_model, baseline_file_name=baseline_file_name, backend=backend)