Skip to content

Commit ac8d9fd

Browse files
committed
Merge branch 'main' into hw_opt_p2
2 parents ce8431d + bd69272 commit ac8d9fd

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

51 files changed

+705
-452
lines changed

docs/api/configuration.rst

Lines changed: 35 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -9,15 +9,18 @@ We currently support two ways of setting hls4ml's model configuration. This page
99

1010
.. contents:: \
1111

12+
The Python API approach is recommended for most users as there are more utilities to help create the configuration dictionaries.
1213

1314
**NOTE:**
1415

1516

1617
*
1718
One important part of ``hls4ml`` to remember is that the user is responsible for the format of the inputs. There is no automatic formatting or normalization so this must be done in the training.
1819

19-
*
20+
..
21+
*
2022
For developers, you might also want to checkout this section: `Detailed configuration in converted hls codes <#detailed-configuration-in-converted-hls-codes>`_.
23+
*Broken link*
2124
2225
----
2326

@@ -31,11 +34,26 @@ Using hls4ml, you can quickly generate a simple configuration dictionary from a
3134
import hls4ml
3235
config = hls4ml.utils.config_from_keras_model(model, granularity='model')
3336
34-
For more advanced and detailed configuration, you can also set them through the created dictionary. For example, to change the reuse factor:
37+
This python dictionary can be edited as needed. A more advanced configuration can be generated by, for example:
38+
39+
.. code-block:: python
40+
41+
import hls4ml
42+
config = hls4ml.utils.config_from_keras_model(
43+
model,
44+
granularity='name',
45+
default_precision='fixed<16,6>',
46+
backend='Vitis')
47+
48+
This will include per-layer configuration based on the model. Including the backend is recommended because some configation options depend on the backend. Note, the precisions at the
49+
higher granularites usually default to 'auto', which means that ``hls4ml`` will try to set it automatically. Note that higher granularity settings take precendence
50+
over model-level settings. See :py:class:`~hls4ml.utils.config.config_from_keras_model` for more information on the various options.
51+
52+
One can override specific values before using the configuration:
3553

3654
.. code-block:: python
3755
38-
config['Model']['ReuseFactor'] = 2
56+
config['LayerName']['fc1']['ReuseFactor'] = 2
3957
4058
Or to set the precision of a specific layer's weight:
4159

@@ -45,6 +63,20 @@ Or to set the precision of a specific layer's weight:
4563
4664
To better understand how the configuration hierachy works, refer to the next section for more details.
4765

66+
Finally, one then uses the configuration to create an hls model:
67+
68+
.. code-block:: python
69+
70+
hls_model = hls4ml.converters.convert_from_keras_model(
71+
model,
72+
hls_config=config,
73+
output_dir="my_project_dir",
74+
io_type='io_stream',
75+
backend='Vitis'
76+
)
77+
78+
See :py:class:`~hls4ml.converters.convert_from_keras_model` for more information on the various options.
79+
4880
----
4981

5082
2. YAML Configuration file

docs/setup.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@ To run FPGA synthesis, installation of following tools is required:
5757

5858
* Xilinx Vivado HLS 2018.2 to 2020.1 for synthesis for Xilinx FPGAs
5959

60-
* Vitis HLS 2022.1 or newer is required for synthesis for Xilinx FPGAs using the experimental ``Vitis`` backend.
60+
* Vitis HLS 2022.2 or newer is required for synthesis for Xilinx FPGAs using the ``Vitis`` backend.
6161

6262
* Intel Quartus 20.1 to 21.4 for the synthesis for Intel FPGAs
6363

docs/status.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -81,7 +81,7 @@ Other feature notes:
8181
* ``hls4ml`` is tested on Linux, and supports
8282
* Vivado HLS versions 2018.2 to 2020.1
8383
* Intel HLS versions 20.1 to 21.4
84-
* Vitis HLS versions 2020.2 to 2022.2 (experimentally)
84+
* Vitis HLS versions 2022.2 to 2024.1
8585
* Windows and macOS are not supported
8686
* BDT support has moved to the `Conifer <https://github.com/thesps/conifer>`__ package
8787

hls4ml/backends/catapult/catapult_backend.py

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
import os
22
import sys
3+
from warnings import warn
34

45
import numpy as np
56

@@ -110,6 +111,8 @@ def _register_flows(self):
110111
'catapult:inplace_stream_flatten',
111112
'catapult:skip_softmax',
112113
'catapult:fix_softmax_table_size',
114+
'catapult:process_fixed_point_quantizer_layer',
115+
'infer_precision_types',
113116
]
114117
optimization_flow = register_flow('optimize', optimization_passes, requires=[init_flow], backend=self.name)
115118

@@ -119,6 +122,7 @@ def _register_flows(self):
119122
'catapult:generate_conv_streaming_instructions',
120123
'catapult:apply_resource_strategy',
121124
'catapult:generate_conv_im2col',
125+
'catapult:apply_winograd_kernel_transformation',
122126
]
123127
catapult_types_flow = register_flow('specific_types', catapult_types, requires=[init_flow], backend=self.name)
124128

@@ -152,9 +156,8 @@ def _register_flows(self):
152156
]
153157

154158
if len(extras) > 0:
155-
extras_flow = register_flow('extras', extras, requires=[init_flow], backend=self.name)
156-
else:
157-
extras_flow = None
159+
for opt in extras:
160+
warn(f'WARNING: Optimizer "{opt}" is not part of any flow and will not be executed.')
158161

159162
ip_flow_requirements = [
160163
'optimize',
@@ -163,10 +166,8 @@ def _register_flows(self):
163166
quantization_flow,
164167
optimization_flow,
165168
catapult_types_flow,
166-
extras_flow,
167169
template_flow,
168170
]
169-
ip_flow_requirements = list(filter(None, ip_flow_requirements))
170171

171172
self._default_flow = register_flow('ip', None, requires=ip_flow_requirements, backend=self.name)
172173

hls4ml/backends/catapult/passes/conv_same_pad.py

Lines changed: 7 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -6,10 +6,8 @@ class InsertZeroPaddingBeforeConv1D(OptimizerPass):
66
name = 'insert_zero_padding_before_conv1d'
77

88
def match(self, node):
9-
is_match = (
10-
isinstance(node, (Conv1D, SeparableConv1D))
11-
and ((node.get_attr('padding') == 'same') or (node.get_attr('padding') == 'causal'))
12-
and node.get_attr('filt_width') != 1
9+
is_match = isinstance(node, (Conv1D, SeparableConv1D)) and (
10+
(node.get_attr('pad_left') != 0) or (node.get_attr('pad_right') != 0)
1311
)
1412
return is_match
1513

@@ -37,7 +35,6 @@ def transform(self, model, node):
3735
}
3836

3937
# Switch Conv1D layer padding to 'valid'
40-
node.set_attr('padding', 'valid')
4138
node.set_attr('pad_left', 0)
4239
node.set_attr('pad_right', 0)
4340
node.set_attr('in_width', out_width)
@@ -54,11 +51,11 @@ class InsertZeroPaddingBeforeConv2D(OptimizerPass):
5451
name = 'insert_zero_padding_before_conv2d'
5552

5653
def match(self, node):
57-
is_match = (
58-
isinstance(node, (Conv2D, SeparableConv2D))
59-
and node.get_attr('padding') == 'same'
60-
and node.get_attr('filt_height') != 1
61-
and node.get_attr('filt_width') != 1
54+
is_match = isinstance(node, (Conv2D, SeparableConv2D)) and (
55+
(node.get_attr('pad_left') != 0)
56+
or (node.get_attr('pad_right') != 0)
57+
or (node.get_attr('pad_top') != 0)
58+
or (node.get_attr('pad_bottom') != 0)
6259
)
6360
return is_match
6461

@@ -93,7 +90,6 @@ def transform(self, model, node):
9390
}
9491

9592
# Switch Conv2D layer padding to 'valid'
96-
node.set_attr('padding', 'valid')
9793
node.set_attr('pad_top', 0)
9894
node.set_attr('pad_bottom', 0)
9995
node.set_attr('pad_left', 0)

hls4ml/backends/quartus/quartus_backend.py

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -55,6 +55,7 @@ def _register_flows(self):
5555
'quartus:transform_types',
5656
'quartus:register_bram_weights',
5757
'quartus:apply_resource_strategy',
58+
'quartus:generate_conv_im2col',
5859
'quartus:apply_winograd_kernel_transformation',
5960
]
6061
quartus_types_flow = register_flow('specific_types', quartus_types, requires=[init_flow], backend=self.name)
@@ -103,9 +104,8 @@ def _register_flows(self):
103104
]
104105

105106
if len(extras) > 0:
106-
extras_flow = register_flow('extras', extras, requires=[init_flow], backend=self.name)
107-
else:
108-
extras_flow = None
107+
for opt in extras:
108+
warn(f'WARNING: Optimizer "{opt}" is not part of any flow and will not be executed.')
109109

110110
ip_flow_requirements = [
111111
'optimize',
@@ -114,10 +114,8 @@ def _register_flows(self):
114114
quantization_flow,
115115
optimization_flow,
116116
quartus_types_flow,
117-
extras_flow,
118117
template_flow,
119118
]
120-
ip_flow_requirements = list(filter(None, ip_flow_requirements))
121119

122120
self._default_flow = register_flow('ip', None, requires=ip_flow_requirements, backend=self.name)
123121

hls4ml/backends/vivado/passes/conv_same_pad.py

Lines changed: 7 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -6,10 +6,8 @@ class InsertZeroPaddingBeforeConv1D(OptimizerPass):
66
name = 'insert_zero_padding_before_conv1d'
77

88
def match(self, node):
9-
is_match = (
10-
isinstance(node, (Conv1D, SeparableConv1D))
11-
and ((node.get_attr('padding') == 'same') or (node.get_attr('padding') == 'causal'))
12-
and node.get_attr('filt_width') != 1
9+
is_match = isinstance(node, (Conv1D, SeparableConv1D)) and (
10+
(node.get_attr('pad_left') != 0) or (node.get_attr('pad_right') != 0)
1311
)
1412
return is_match
1513

@@ -37,7 +35,6 @@ def transform(self, model, node):
3735
}
3836

3937
# Switch Conv1D layer padding to 'valid'
40-
node.set_attr('padding', 'valid')
4138
node.set_attr('pad_left', 0)
4239
node.set_attr('pad_right', 0)
4340
node.set_attr('in_width', out_width)
@@ -54,11 +51,11 @@ class InsertZeroPaddingBeforeConv2D(OptimizerPass):
5451
name = 'insert_zero_padding_before_conv2d'
5552

5653
def match(self, node):
57-
is_match = (
58-
isinstance(node, (Conv2D, SeparableConv2D))
59-
and node.get_attr('padding') == 'same'
60-
and node.get_attr('filt_height') != 1
61-
and node.get_attr('filt_width') != 1
54+
is_match = isinstance(node, (Conv2D, SeparableConv2D)) and (
55+
(node.get_attr('pad_left') != 0)
56+
or (node.get_attr('pad_right') != 0)
57+
or (node.get_attr('pad_top') != 0)
58+
or (node.get_attr('pad_bottom') != 0)
6259
)
6360
return is_match
6461

@@ -93,7 +90,6 @@ def transform(self, model, node):
9390
}
9491

9592
# Switch Conv2D layer padding to 'valid'
96-
node.set_attr('padding', 'valid')
9793
node.set_attr('pad_top', 0)
9894
node.set_attr('pad_bottom', 0)
9995
node.set_attr('pad_left', 0)

hls4ml/backends/vivado/vivado_backend.py

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -148,9 +148,8 @@ def _register_flows(self):
148148
]
149149

150150
if len(extras) > 0:
151-
extras_flow = register_flow('extras', extras, requires=[init_flow], backend=self.name)
152-
else:
153-
extras_flow = None
151+
for opt in extras:
152+
warn(f'WARNING: Optimizer "{opt}" is not part of any flow and will not be executed.')
154153

155154
ip_flow_requirements = [
156155
'optimize',
@@ -159,10 +158,8 @@ def _register_flows(self):
159158
quantization_flow,
160159
optimization_flow,
161160
vivado_types_flow,
162-
extras_flow,
163161
template_flow,
164162
]
165-
ip_flow_requirements = list(filter(None, ip_flow_requirements))
166163

167164
self._default_flow = register_flow('ip', None, requires=ip_flow_requirements, backend=self.name)
168165

hls4ml/converters/__init__.py

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,8 @@
1010
from hls4ml.converters.keras_to_hls import get_supported_keras_layers # noqa: F401
1111
from hls4ml.converters.keras_to_hls import parse_keras_model # noqa: F401
1212
from hls4ml.converters.keras_to_hls import keras_to_hls, register_keras_layer_handler
13+
14+
# from hls4ml.converters.pytorch_to_hls import parse_pytorch_model # noqa: F401
1315
from hls4ml.model import ModelGraph
1416
from hls4ml.utils.config import create_config
1517
from hls4ml.utils.symbolic_utils import LUTFunction
@@ -238,7 +240,6 @@ def convert_from_keras_model(
238240

239241
def convert_from_pytorch_model(
240242
model,
241-
input_shape,
242243
output_dir='my-hls-test',
243244
project_name='myproject',
244245
input_data_tb=None,
@@ -251,7 +252,6 @@ def convert_from_pytorch_model(
251252
252253
Args:
253254
model: PyTorch model to convert.
254-
input_shape (list): The shape of the input tensor. First element is the batch size, needs to be None
255255
output_dir (str, optional): Output directory of the generated HLS project. Defaults to 'my-hls-test'.
256256
project_name (str, optional): Name of the HLS project. Defaults to 'myproject'.
257257
input_data_tb (str, optional): String representing the path of input data in .npy or .dat format that will be
@@ -293,17 +293,16 @@ def convert_from_pytorch_model(
293293
config = create_config(output_dir=output_dir, project_name=project_name, backend=backend, **kwargs)
294294

295295
config['PytorchModel'] = model
296-
config['InputShape'] = input_shape
297296
config['InputData'] = input_data_tb
298297
config['OutputPredictions'] = output_data_tb
299298
config['HLSConfig'] = {}
300299

301300
if hls_config is None:
302301
hls_config = {}
303302

304-
model_config = hls_config.get('Model', None)
303+
model_config = hls_config.get('Model')
305304
config['HLSConfig']['Model'] = _check_model_config(model_config)
306-
305+
config['InputShape'] = hls_config.get('InputShape')
307306
_check_hls_config(config, hls_config)
308307

309308
return pytorch_to_hls(config)

hls4ml/converters/keras/convolution.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -30,10 +30,9 @@ def parse_conv1d_layer(keras_layer, input_names, input_shapes, data_reader):
3030
layer['n_filt'] = layer['n_chan'] * layer.get('depth_multiplier')
3131
layer['filt_width'] = keras_layer['config']['kernel_size'][0]
3232
layer['stride_width'] = keras_layer['config']['strides'][0]
33-
layer['padding'] = keras_layer['config']['padding']
3433

3534
(layer['out_width'], layer['pad_left'], layer['pad_right']) = compute_padding_1d(
36-
layer['padding'], layer['in_width'], layer['stride_width'], layer['filt_width']
35+
keras_layer['config']['padding'], layer['in_width'], layer['stride_width'], layer['filt_width']
3736
)
3837

3938
if layer['data_format'] == 'channels_last':
@@ -74,7 +73,6 @@ def parse_conv2d_layer(keras_layer, input_names, input_shapes, data_reader):
7473
layer['filt_width'] = keras_layer['config']['kernel_size'][1]
7574
layer['stride_height'] = keras_layer['config']['strides'][0]
7675
layer['stride_width'] = keras_layer['config']['strides'][1]
77-
layer['padding'] = keras_layer['config']['padding']
7876

7977
(
8078
layer['out_height'],
@@ -84,7 +82,7 @@ def parse_conv2d_layer(keras_layer, input_names, input_shapes, data_reader):
8482
layer['pad_left'],
8583
layer['pad_right'],
8684
) = compute_padding_2d(
87-
layer['padding'],
85+
keras_layer['config']['padding'],
8886
layer['in_height'],
8987
layer['in_width'],
9088
layer['stride_height'],

0 commit comments

Comments
 (0)