Skip to content

Commit 290f2c8

Browse files
committed
Merge branch 'main' of https://github.com/fastmachinelearning/hls4ml into fifo_depth_merge
2 parents d072262 + a3cb4de commit 290f2c8

23 files changed

+267
-274
lines changed

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88

99
A package for machine learning inference in FPGAs. We create firmware implementations of machine learning algorithms using high level synthesis language (HLS). We translate traditional open-source machine learning package models into HLS that can be configured for your use-case!
1010

11-
**Contact:** hls4ml.help@gmail.com
11+
If you have any questions, comments, or ideas regarding hls4ml or just want to show us how you use hls4ml, don't hesitate to reach us through the [discussions](https://github.com/fastmachinelearning/hls4ml/discussions) tab.
1212

1313
# Documentation & Tutorial
1414

docs/index.rst

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -35,9 +35,7 @@ Welcome to hls4ml's documentation!
3535

3636
``hls4ml`` is a Python package for machine learning inference in FPGAs. We create firmware implementations of machine learning algorithms using high level synthesis language (HLS). We translate traditional open-source machine learning package models into HLS that can be configured for your use-case!
3737

38-
The project is currently in development, so please let us know if you are interested, your experiences with the package, and if you would like new features to be added.
39-
40-
Contact: hls4ml.help@gmail.com
38+
The project is currently in development, so please let us know if you are interested, your experiences with the package, and if you would like new features to be added. You can reach us through our GitHub page.
4139

4240

4341
Project Status

hls4ml/backends/backend.py

Lines changed: 6 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -25,14 +25,12 @@ def _init_class_optimizers(self):
2525
return class_optimizers
2626

2727
def _init_file_optimizers(self):
28-
opt_path = os.path.dirname(inspect.getfile(self.__class__)) + '/passes'
29-
module_path = self.__module__[:self.__module__.rfind('.')] + '.passes'
30-
file_optimizers = extract_optimizers_from_path(opt_path, module_path, self)
31-
for base in self.__class__.__bases__:
32-
opt_path = os.path.dirname(inspect.getfile(base)) + '/passes'
33-
module_path = base.__module__[:base.__module__.rfind('.')] + '.passes'
34-
base_optimizers = extract_optimizers_from_path(opt_path, module_path, self)
35-
file_optimizers.update(base_optimizers)
28+
file_optimizers = {}
29+
for cls in [*self.__class__.__bases__, self.__class__]:
30+
opt_path = os.path.dirname(inspect.getfile(cls)) + '/passes'
31+
module_path = cls.__module__[:cls.__module__.rfind('.')] + '.passes'
32+
cls_optimizers = extract_optimizers_from_path(opt_path, module_path, self)
33+
file_optimizers.update(cls_optimizers)
3634
return file_optimizers
3735

3836
def _get_layer_initializers(self):

hls4ml/backends/fpga/fpga_types.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -184,11 +184,11 @@ def definition_cpp(self, name_suffix='', as_reference=False):
184184

185185
class VivadoArrayVariableDefinition(VariableDefinition):
186186
def definition_cpp(self, name_suffix='', as_reference=False):
187-
return '{type} {name}{suffix}[{shape}]'.format(type=self.type.name, name=self.cppname, suffix=name_suffix, shape=self.size_cpp())
187+
return '{type} {name}{suffix}[{shape}]'.format(type=self.type.name, name=self.name, suffix=name_suffix, shape=self.size_cpp())
188188

189189
class QuartusArrayVariableDefinition(VariableDefinition):
190190
def definition_cpp(self, name_suffix='', as_reference=False):
191-
return '{type} {name}{suffix}[{shape}] {pragma}'.format(type=self.type.name, name=self.cppname, suffix=name_suffix, shape=self.size_cpp(), pragma=self.pragma)
191+
return '{type} {name}{suffix}[{shape}] {pragma}'.format(type=self.type.name, name=self.name, suffix=name_suffix, shape=self.size_cpp(), pragma=self.pragma)
192192

193193
class ArrayVariableConverter(object):
194194
def __init__(self, type_converter, prefix, definition_cls):
@@ -254,9 +254,9 @@ def __init__(self, type_converter):
254254
class VivadoStreamVariableDefinition(VariableDefinition):
255255
def definition_cpp(self, name_suffix='', as_reference=False):
256256
if as_reference: # Function parameter
257-
return 'hls::stream<{type}> &{name}{suffix}'.format(type=self.type.name, name=self.cppname, suffix=name_suffix)
257+
return 'hls::stream<{type}> &{name}{suffix}'.format(type=self.type.name, name=self.name, suffix=name_suffix)
258258
else: # Declaration
259-
return 'hls::stream<{type}> {name}{suffix}("{name}")'.format(type=self.type.name, name=self.cppname, suffix=name_suffix)
259+
return 'hls::stream<{type}> {name}{suffix}("{name}")'.format(type=self.type.name, name=self.name, suffix=name_suffix)
260260

261261
class StreamVariableConverter(object):
262262
def __init__(self, type_converter, prefix, definition_cls):
@@ -315,7 +315,7 @@ def __init__(self, type_converter):
315315

316316
class StaticWeightVariableDefinition(VariableDefinition):
317317
def definition_cpp(self, name_suffix='', as_reference=False):
318-
return '{type} {name}[{size}]'.format(type=self.type.name, name=self.cppname, size=self.data_length)
318+
return '{type} {name}[{size}]'.format(type=self.type.name, name=self.name, size=self.data_length)
319319

320320
class StaticWeightVariableConverter(object):
321321
def __init__(self, type_converter):

hls4ml/backends/vivado/passes/recurrent_templates.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -17,8 +17,8 @@
1717
typedef {bias_t.name} bias_t;
1818
typedef {weight_t.name} weight_t;
1919
typedef ap_{index_t} index_t;
20-
template<class x_T, class y_T, class res_T>
21-
using product = nnet::product::{product_type}<x_T, y_T, res_T>;
20+
template<class x_T, class y_T>
21+
using product = nnet::product::{product_type}<x_T, y_T>;
2222
}};\n"""
2323

2424
#activation templates

hls4ml/backends/vivado/passes/transform_types.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,8 +20,6 @@ def transform(self, model, node):
2020
new_var = self.inplace_var_converter.convert(var, io_type)
2121
if io_type == 'io_stream':
2222
new_var = self.stream_var_converter.convert(var)
23-
elif io_type == 'io_serial':
24-
new_var = self.array_var_converter.convert(var, pragma='stream')
2523
elif io_type == 'io_parallel':
2624
if node.name in node.model.inputs:
2725
new_var = self.array_var_converter.convert(var, pragma='reshape')

hls4ml/backends/vivado/vivado_backend.py

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -247,9 +247,6 @@ def init_lstm(self, layer):
247247
reuse_factor = layer.model.config.get_reuse_factor(layer)
248248
layer.set_attr('recurrent_reuse_factor', reuse_factor)
249249

250-
recurrent_bias = np.zeros(layer.weights['recurrent_weight'].shape[1])
251-
layer.add_weights_variable(name='recurrent_bias', var_name='br{index}', data=recurrent_bias)
252-
253250
index_t = IntegerPrecisionType(width=1, signed=False)
254251

255252
if 'table_t' not in layer.attributes:
@@ -273,9 +270,6 @@ def init_gru(self, layer):
273270
reuse_factor = layer.model.config.get_reuse_factor(layer)
274271
layer.set_attr('recurrent_reuse_factor', reuse_factor)
275272

276-
recurrent_bias = np.zeros(layer.weights['recurrent_weight'].shape[1])
277-
layer.add_weights_variable(name='recurrent_bias', var_name='br{index}', data=recurrent_bias)
278-
279273
index_t = IntegerPrecisionType(width=1, signed=False)
280274

281275
if 'table_t' not in layer.attributes:

hls4ml/converters/__init__.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -188,7 +188,7 @@ def convert_from_keras_model(model, output_dir='my-hls-test', project_name='mypr
188188
clock_period (int, optional): Clock period of the design.
189189
Defaults to 5.
190190
io_type (str, optional): Type of implementation used. One of
191-
'io_parallel' or 'io_serial'. Defaults to 'io_parallel'.
191+
'io_parallel' or 'io_stream'. Defaults to 'io_parallel'.
192192
hls_config (dict, optional): The HLS config.
193193
kwargs** (dict, optional): Additional parameters that will be used to create the config of the specified backend
194194
Raises:
@@ -246,7 +246,7 @@ def convert_from_pytorch_model(model, input_shape, output_dir='my-hls-test', pro
246246
clock_period (int, optional): Clock period of the design.
247247
Defaults to 5.
248248
io_type (str, optional): Type of implementation used. One of
249-
'io_parallel' or 'io_serial'. Defaults to 'io_parallel'.
249+
'io_parallel' or 'io_stream'. Defaults to 'io_parallel'.
250250
hls_config (dict, optional): The HLS config.
251251
kwargs** (dict, optional): Additional parameters that will be used to create the config of the specified backend
252252
@@ -319,7 +319,7 @@ def convert_from_onnx_model(model, output_dir='my-hls-test', project_name='mypro
319319
clock_period (int, optional): Clock period of the design.
320320
Defaults to 5.
321321
io_type (str, optional): Type of implementation used. One of
322-
'io_parallel' or 'io_serial'. Defaults to 'io_parallel'.
322+
'io_parallel' or 'io_stream'. Defaults to 'io_parallel'.
323323
hls_config (dict, optional): The HLS config.
324324
kwargs** (dict, optional): Additional parameters that will be used to create the config of the specified backend
325325

hls4ml/converters/keras/reshaping.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,8 @@ def parse_zeropadding1d_layer(keras_layer, input_names, input_shapes, data_reade
2626
]
2727
layer['out_width'] = output_shape[2]
2828
layer['n_chan'] = output_shape[1]
29+
30+
layer['in_width'] = input_shapes[0][2]
2931
else:
3032
output_shape = [
3133
input_shapes[0][0], # Batch
@@ -35,6 +37,8 @@ def parse_zeropadding1d_layer(keras_layer, input_names, input_shapes, data_reade
3537
layer['out_width'] = output_shape[1]
3638
layer['n_chan'] = output_shape[2]
3739

40+
layer['in_width'] = input_shapes[0][1]
41+
3842
return layer, output_shape
3943

4044
@keras_handler('ZeroPadding2D')
@@ -74,6 +78,9 @@ def parse_zeropadding2d_layer(keras_layer, input_names, input_shapes, data_reade
7478
layer['out_height'] = output_shape[2]
7579
layer['out_width'] = output_shape[3]
7680
layer['n_chan'] = output_shape[1]
81+
82+
layer['in_height'] = input_shapes[0][2]
83+
layer['in_width'] = input_shapes[0][3]
7784
else:
7885
output_shape = [
7986
input_shapes[0][0], # Batch
@@ -85,4 +92,7 @@ def parse_zeropadding2d_layer(keras_layer, input_names, input_shapes, data_reade
8592
layer['out_width'] = output_shape[2]
8693
layer['n_chan'] = output_shape[3]
8794

95+
layer['in_height'] = input_shapes[0][1]
96+
layer['in_width'] = input_shapes[0][2]
97+
8898
return layer, output_shape

hls4ml/model/layers.py

Lines changed: 27 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -885,12 +885,17 @@ def initialize(self):
885885
self.add_output_variable(state_shape, state_dims, out_name=self.outputs[1], var_name='layer{index}_h', type_name='layer{index}_h_t')
886886
self.add_output_variable(state_shape, state_dims, out_name=self.outputs[2], var_name='layer{index}_c', type_name='layer{index}_c_t')
887887

888+
#weights
888889
self.add_weights()
889-
self.add_bias()
890890

891+
#recurrent weights
891892
recurrent_weight = self.model.get_weights_data(self.name, 'recurrent_kernel')
892893
self.add_weights_variable(name='recurrent_weight', var_name='wr{index}', data=recurrent_weight)
893894

895+
#biases
896+
biases = self.model.get_weights_data(self.name , 'bias')
897+
self.add_weights_variable(name='bias', var_name='b{index}', data=biases)
898+
894899
class LSTM(Layer):
895900
_expected_attributes = [
896901
Attribute('n_out'),
@@ -904,10 +909,12 @@ class LSTM(Layer):
904909
WeightAttribute('weight'),
905910
WeightAttribute('bias'),
906911
WeightAttribute('recurrent_weight'),
912+
WeightAttribute('recurrent_bias'),
907913

908914
TypeAttribute('weight'),
909915
TypeAttribute('bias'),
910916
TypeAttribute('recurrent_weight'),
917+
TypeAttribute('recurrent_bias'),
911918
]
912919

913920
def initialize(self):
@@ -926,12 +933,20 @@ def initialize(self):
926933
self.add_output_variable(state_shape, state_dims, out_name=self.outputs[1], var_name='layer{index}_h', type_name='layer{index}_h_t')
927934
self.add_output_variable(state_shape, state_dims, out_name=self.outputs[2], var_name='layer{index}_c', type_name='layer{index}_c_t')
928935

936+
#weights
929937
self.add_weights()
930-
self.add_bias()
931938

939+
#recurrent weights
932940
recurrent_weight = self.model.get_weights_data(self.name, 'recurrent_kernel')
933941
self.add_weights_variable(name='recurrent_weight', var_name='wr{index}', data=recurrent_weight)
934942

943+
#biases
944+
biases = self.model.get_weights_data(self.name , 'bias')
945+
self.add_weights_variable(name='bias', var_name='b{index}', data=biases)
946+
947+
recurrent_bias = np.zeros(recurrent_weight.shape[1])
948+
self.add_weights_variable(name='recurrent_bias', var_name='br{index}', data=recurrent_bias)
949+
935950
class GRU(Layer):
936951
_expected_attributes = [
937952
Attribute('n_out'),
@@ -946,10 +961,12 @@ class GRU(Layer):
946961
WeightAttribute('weight'),
947962
WeightAttribute('bias'),
948963
WeightAttribute('recurrent_weight'),
964+
WeightAttribute('recurrent_bias'),
949965

950966
TypeAttribute('weight'),
951967
TypeAttribute('bias'),
952968
TypeAttribute('recurrent_weight'),
969+
TypeAttribute('recurrent_bias'),
953970
]
954971

955972
def initialize(self):
@@ -968,12 +985,19 @@ def initialize(self):
968985
self.add_output_variable(state_shape, state_dims, out_name=self.outputs[1], var_name='layer{index}_h', type_name='layer{index}_h_t')
969986
self.add_output_variable(state_shape, state_dims, out_name=self.outputs[2], var_name='layer{index}_c', type_name='layer{index}_c_t')
970987

988+
#weights
971989
self.add_weights()
972-
self.add_bias()
973990

991+
#recurrent weights
974992
recurrent_weight = self.model.get_weights_data(self.name, 'recurrent_kernel')
975993
self.add_weights_variable(name='recurrent_weight', var_name='wr{index}', data=recurrent_weight)
976994

995+
#biases array is actually a 2-dim array of arrays (bias + recurrent bias)
996+
#both arrays have shape: n_units * 3 (z, r, h_cand)
997+
biases = self.model.get_weights_data(self.name , 'bias')
998+
self.add_weights_variable(name='bias', var_name='b{index}', data=biases[0])
999+
self.add_weights_variable(name='recurrent_bias', var_name='br{index}', data=biases[1])
1000+
9771001
class GarNet(Layer):
9781002
ref_impl = False
9791003

0 commit comments

Comments
 (0)