Skip to content

Commit ecda5c9

Browse files
committed
Default to 'auto' for pipeline style and move check to an optimizer
1 parent fbc4107 commit ecda5c9

File tree

6 files changed

+255
-68
lines changed

6 files changed

+255
-68
lines changed
Lines changed: 131 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,131 @@
1+
from hls4ml.model.layers import Conv1D, Conv2D
2+
from hls4ml.model.optimizer import ModelOptimizerPass
3+
4+
5+
class SetPipelineStyle(ModelOptimizerPass):
6+
def __init__(self):
7+
pass
8+
9+
def transform(self, model):
10+
if model.config.pipeline_style not in ['auto', 'pipeline', 'dataflow']:
11+
print(
12+
f'WARNING: Pipeline style set to {model.config.pipeline_style}, valid values: auto, pipeline, dataflow. '
13+
'Using "auto".'
14+
)
15+
self._set_pipeline_style(model, 'auto')
16+
17+
if model.config.pipeline_style is None or model.config.pipeline_style == 'auto':
18+
19+
if self._maybe_set_dataflow_io_stream(model):
20+
return True
21+
22+
if self._maybe_set_dataflow_conv_layers(model):
23+
return True
24+
25+
if self._maybe_set_dataflow_resource_strategy(model):
26+
return True
27+
28+
if self._maybe_set_pipeline_unrolled_strategy(model):
29+
return True
30+
31+
if self._maybe_set_pipeline_io_parallel(model):
32+
return True
33+
34+
self._set_safe_default_dataflow(model)
35+
return True
36+
else:
37+
self._validate_hls_config(model)
38+
39+
return False # No model changes made
40+
41+
def _set_pipeline_style(self, model, pipeline_style):
42+
# Could add logging here
43+
model.config.pipeline_style = pipeline_style
44+
45+
def _maybe_set_dataflow_io_stream(self, model):
46+
if model.config.get_config_value('IOType') == 'io_stream':
47+
self._set_pipeline_style(model, 'dataflow')
48+
return True
49+
50+
return False
51+
52+
def _maybe_set_dataflow_conv_layers(self, model):
53+
for layer in model.get_layers():
54+
if isinstance(layer, (Conv1D, Conv2D)):
55+
self._set_pipeline_style(model, 'dataflow')
56+
return True
57+
58+
return False
59+
60+
def _maybe_set_dataflow_resource_strategy(self, model):
61+
for layer in model.get_layers():
62+
if model.config.is_resource_strategy(layer):
63+
self._set_pipeline_style(model, 'dataflow')
64+
return True
65+
66+
return False
67+
68+
def _maybe_set_pipeline_unrolled_strategy(self, model):
69+
have_unrolled = False
70+
for layer in model.get_layers():
71+
if model.config.get_strategy(layer).lower() == 'unrolled':
72+
self._set_pipeline_style(model, 'pipeline')
73+
have_unrolled = True
74+
break
75+
76+
if have_unrolled:
77+
model.config.pipeline_ii = max([int(layer.get_attr('reuse_factor')) for layer in model.get_layers()])
78+
79+
return have_unrolled
80+
81+
def _maybe_set_pipeline_io_parallel(self, model):
82+
if model.config.get_config_value('IOType') == 'io_parallel':
83+
self._set_pipeline_style(model, 'pipeline')
84+
return True
85+
86+
return False
87+
88+
def _set_safe_default_dataflow(self, model):
89+
print(
90+
'WARNING: Couldn\'t determine best pipeline style, defaulting to "DATAFLOW". '
91+
'Use "PipelineStyle" property to override.'
92+
)
93+
self._set_pipeline_style(model, 'dataflow')
94+
95+
def _validate_hls_config(self, model):
96+
if model.config.pipeline_style.lower() == 'pipeline':
97+
if model.config.model_compression:
98+
print('WARNING: Compression enabled while pipeline style set to "pipeline".')
99+
if model.config.model_strategy.lower() == 'resource':
100+
print(
101+
'WARNING: Model strategy "Resource" will lead to bad QoR in combination '
102+
'with pipeline style set to "pipeline".'
103+
)
104+
if any(isinstance(layer, (Conv1D, Conv2D)) for layer in model.get_layers()):
105+
print('WARNING: Convolution layers require "dataflow" pipeline style.')
106+
for layer_type, strategy in model.config.layer_type_strategy.items():
107+
if strategy.lower() == 'resource' and model.config.pipeline_style.lower() == 'pipeline':
108+
print(
109+
f'WARNING: Strategy for layer type {layer_type} set to "Resource", while pipeline style set to '
110+
'"pipeline". This will lead to bad QoR.'
111+
)
112+
113+
for layer_name, strategy in model.config.layer_name_strategy.items():
114+
if strategy.lower() == 'resource' and model.config.pipeline_style.lower() == 'pipeline':
115+
print(
116+
'WARNING: Strategy for layer {} set to "Resource", while pipeline style set to "pipeline".'.format(
117+
layer_name
118+
)
119+
)
120+
121+
for layer_type, compression in model.config.layer_type_compression.items():
122+
if compression and model.config.pipeline_style.lower() == 'pipeline':
123+
print(
124+
'WARNING: Compression enabled for layer type {}, while pipeline style set to "pipeline".'.format(
125+
layer_type
126+
)
127+
)
128+
129+
for layer_name, compression in model.config.layer_name_compression.items():
130+
if compression and model.config.pipeline_style.lower() == 'pipeline':
131+
print(f'WARNING: Compression enabled for layer {layer_name}, while pipeline style set to "pipeline".')

hls4ml/backends/vivado/vivado_backend.py

Lines changed: 5 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -114,6 +114,7 @@ def _register_flows(self):
114114
'vivado:apply_resource_strategy',
115115
'vivado:generate_conv_im2col',
116116
'vivado:generate_unrolled_dense_resource',
117+
'vivado:set_pipeline_style',
117118
]
118119
vivado_types_flow = register_flow('specific_types', vivado_types, requires=[init_flow], backend=self.name)
119120

@@ -247,11 +248,6 @@ def build(
247248

248249
return parse_vivado_report(model.config.get_output_dir())
249250

250-
def _validate_conv_strategy(self, layer):
251-
if layer.model.config.pipeline_style.lower() != 'dataflow':
252-
print(f'WARNING: Layer {layer.name} requires "dataflow" pipeline style. Switching to "dataflow" pipeline style.')
253-
layer.model.config.pipeline_style = 'dataflow'
254-
255251
@layer_optimizer(Layer)
256252
def init_base_layer(self, layer):
257253
reuse_factor = layer.model.config.get_reuse_factor(layer)
@@ -356,8 +352,6 @@ def init_conv1d(self, layer):
356352

357353
layer.set_attr('implementation', layer.model.config.get_conv_implementation(layer).lower())
358354

359-
self._validate_conv_strategy(layer)
360-
361355
@layer_optimizer(SeparableConv1D)
362356
def init_sepconv1d(self, layer):
363357
if layer.model.config.is_resource_strategy(layer):
@@ -480,8 +474,6 @@ def init_conv2d(self, layer):
480474

481475
layer.set_attr('implementation', layer.model.config.get_conv_implementation(layer).lower())
482476

483-
self._validate_conv_strategy(layer)
484-
485477
@layer_optimizer(SeparableConv2D)
486478
def init_sepconv2d(self, layer):
487479
if layer.model.config.is_resource_strategy(layer):
@@ -585,8 +577,10 @@ def init_lstm(self, layer):
585577
n_in, n_out, n_in_recr, n_out_recr = self.get_layer_mult_size(layer)
586578
if use_resource_instead:
587579
self.set_closest_reuse_factor(layer, n_in, n_out)
580+
self.set_closest_reuse_factor(layer, n_in_recr, n_out_recr, attribute='recurrent_reuse_factor')
588581
layer.set_attr('strategy', 'resource')
589582
else:
583+
self.set_closest_reuse_factor(layer, n_in, n_out, include_max_rf=False)
590584
self.set_closest_reuse_factor(
591585
layer, n_in_recr, n_out_recr, attribute='recurrent_reuse_factor', include_max_rf=False
592586
)
@@ -617,8 +611,10 @@ def init_gru(self, layer):
617611
n_in, n_out, n_in_recr, n_out_recr = self.get_layer_mult_size(layer)
618612
if use_resource_instead:
619613
self.set_closest_reuse_factor(layer, n_in, n_out)
614+
self.set_closest_reuse_factor(layer, n_in_recr, n_out_recr, attribute='recurrent_reuse_factor')
620615
layer.set_attr('strategy', 'resource')
621616
else:
617+
self.set_closest_reuse_factor(layer, n_in, n_out, include_max_rf=False)
622618
self.set_closest_reuse_factor(
623619
layer, n_in_recr, n_out_recr, attribute='recurrent_reuse_factor', include_max_rf=False
624620
)

hls4ml/model/graph.py

Lines changed: 4 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,8 @@ def __init__(self, config):
4949

5050
self.trace_output = self.get_config_value('TraceOutput', False)
5151

52-
self.pipeline_style = 'pipeline'
52+
self.pipeline_style = 'auto'
53+
self.pipeline_ii = None
5354

5455
if 'WriterConfig' in self.config:
5556
self.writer_config = self.config['WriterConfig']
@@ -61,7 +62,6 @@ def __init__(self, config):
6162
}
6263

6364
self._parse_hls_config()
64-
self._validate_hls_config()
6565

6666
def get_config_value(self, key, default=None):
6767
return self.config.get(key, default)
@@ -267,7 +267,8 @@ def _parse_hls_config(self):
267267
self.model_conv_implementation = model_cfg.get('ConvImplementation', 'LineBuffer')
268268
self.model_strategy = model_cfg.get('Strategy', 'Latency')
269269
self.model_compression = bool(model_cfg.get('Compression', 0))
270-
self.pipeline_style = model_cfg.get('PipelineStyle', 'pipeline')
270+
self.pipeline_style = model_cfg.get('PipelineStyle', 'auto')
271+
self.pipeline_ii = model_cfg.get('PipelineInterval', None)
271272

272273
layer_type_cfg = hls_config.get('LayerType')
273274
if layer_type_cfg is not None:
@@ -304,50 +305,6 @@ def _parse_hls_config(self):
304305
for layer_name, layer_cfg in layer_name_cfg.items():
305306
self.parse_name_config(layer_name, layer_cfg)
306307

307-
def _validate_hls_config(self):
308-
use_dataflow = False
309-
if self.pipeline_style.lower() == 'pipeline' and self.model_compression:
310-
print('WARNING: Compression enabled while pipeline style set to "pipeline".')
311-
use_dataflow = True
312-
for layer_type, strategy in self.layer_type_strategy.items():
313-
if strategy.lower() == 'resource' and self.pipeline_style.lower() == 'pipeline':
314-
print(
315-
'WARNING: Strategy for layer type {} set to "Resource", while pipeline style set to "pipeline".'.format(
316-
layer_type
317-
)
318-
)
319-
use_dataflow = True
320-
321-
for layer_name, strategy in self.layer_name_strategy.items():
322-
if strategy.lower() == 'resource' and self.pipeline_style.lower() == 'pipeline':
323-
print(
324-
'WARNING: Strategy for layer {} set to "Resource", while pipeline style set to "pipeline".'.format(
325-
layer_name
326-
)
327-
)
328-
use_dataflow = True
329-
330-
for layer_type, compression in self.layer_type_compression.items():
331-
if compression and self.pipeline_style.lower() == 'pipeline':
332-
print(
333-
'WARNING: Compression enabled for layer type {}, while pipeline style set to "pipeline".'.format(
334-
layer_type
335-
)
336-
)
337-
use_dataflow = True
338-
339-
for layer_name, compression in self.layer_name_compression.items():
340-
if compression and self.pipeline_style.lower() == 'pipeline':
341-
print(f'WARNING: Compression enabled for layer {layer_name}, while pipeline style set to "pipeline".')
342-
use_dataflow = True
343-
344-
if self.model_strategy.lower() == 'resource':
345-
use_dataflow = True
346-
347-
if use_dataflow:
348-
print('WARNING: Changing pipeline style to "dataflow".')
349-
self.pipeline_style = 'dataflow'
350-
351308

352309
class ModelGraph:
353310
"""The ModelGraph represents the network that is being processed by hls4ml.

hls4ml/writer/vivado_writer.py

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -199,7 +199,15 @@ def write_project_cpp(self, model):
199199
all_inputs = [i.name for i in model_inputs]
200200
all_outputs = [o.name for o in model_outputs]
201201
all_brams = [b.name for b in model_brams]
202-
io_type = model.config.get_config_value("IOType")
202+
io_type = model.config.get_config_value('IOType')
203+
204+
pipeline_style = model.config.pipeline_style
205+
pipeline_ii = model.config.pipeline_ii
206+
pipeline_pragma = indent + f'#pragma HLS {pipeline_style.upper()}'
207+
if pipeline_style == 'pipeline' and pipeline_ii is not None:
208+
pipeline_pragma += f' II={pipeline_ii}\n'
209+
else:
210+
pipeline_pragma += '\n'
203211

204212
if io_type == 'io_parallel':
205213
for i in model_inputs:
@@ -211,23 +219,15 @@ def write_project_cpp(self, model):
211219
newline += indent + '#pragma HLS INTERFACE ap_vld port={},{} \n'.format(
212220
','.join(all_inputs), ','.join(all_outputs)
213221
)
222+
newline += pipeline_pragma
214223

215-
model_cfg = model.config.get_config_value('HLSConfig')['Model']
216-
if model_cfg.get('Strategy', 'latency').lower() == 'unrolled':
217-
max_rf = max([int(layer.get_attr('reuse_factor')) for layer in model.get_layers()])
218-
newline += indent + f'#pragma HLS PIPELINE II={max_rf} \n'
219-
else:
220-
if model.config.pipeline_style.lower() == 'dataflow':
221-
newline += indent + '#pragma HLS DATAFLOW \n'
222-
else:
223-
newline += indent + '#pragma HLS PIPELINE \n'
224224
if io_type == 'io_stream':
225225
newline += indent + '#pragma HLS INTERFACE axis port={},{} \n'.format(
226226
','.join(all_inputs), ','.join(all_outputs)
227227
)
228228
if all_brams:
229229
newline += indent + '#pragma HLS INTERFACE bram port={} \n'.format(','.join(all_brams))
230-
newline += indent + '#pragma HLS DATAFLOW \n'
230+
newline += pipeline_pragma
231231

232232
elif '// hls-fpga-machine-learning insert layers' in line:
233233
newline = line + '\n'

test/pytest/test_dense_unrolled.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,7 @@ def test_dense_unrolled_streaming_conv(dim, io_type, reuse_factor):
8484
@pytest.mark.parametrize('backend', ['Vitis', 'Vivado'])
8585
@pytest.mark.parametrize('io_type', ['io_parallel', 'io_stream'])
8686
@pytest.mark.parametrize('static', [True, False])
87-
@pytest.mark.parametrize('reuse_factor', [1, 4, 32, 128]) # These should be enough
87+
@pytest.mark.parametrize('reuse_factor', [1, 4, 32, 128]) # RF=128 also tests if setting closest RF works well
8888
def test_rnn_unrolled(rnn_layer, backend, io_type, static, reuse_factor):
8989
# Subtract 0.5 to include negative values
9090
input_shape = (12, 8)
@@ -118,6 +118,10 @@ def test_rnn_unrolled(rnn_layer, backend, io_type, static, reuse_factor):
118118
hls_model = convert_from_keras_model(
119119
keras_model, hls_config=hls_config, output_dir=output_dir, backend=backend, io_type=io_type
120120
)
121+
122+
# Check if strategy was not overridden
123+
assert list(hls_model.get_layers())[1].get_attr('strategy') == 'unrolled' if reuse_factor > 1 else 'latency'
124+
121125
hls_model.compile()
122126

123127
keras_prediction = keras_model.predict(X)

0 commit comments

Comments
 (0)