Skip to content

Commit 3b9c5e6

Browse files
committed
fix merge conflicts in RNN parsing
2 parents ed3eaa4 + 4b4b5a0 commit 3b9c5e6

25 files changed

+669
-137
lines changed

hls4ml/backends/vivado/passes/convolution_templates.py

Lines changed: 19 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,14 @@
11
from hls4ml.backends.backend import get_backend
22
from hls4ml.backends.template import FunctionCallTemplate, LayerConfigTemplate
3-
from hls4ml.model.layers import Conv1D, Conv2D, Conv2DBatchnorm, DepthwiseConv2D, SeparableConv1D, SeparableConv2D
3+
from hls4ml.model.layers import (
4+
Conv1D,
5+
Conv2D,
6+
Conv2DBatchnorm,
7+
DepthwiseConv1D,
8+
DepthwiseConv2D,
9+
SeparableConv1D,
10+
SeparableConv2D,
11+
)
412

513
# Shared multiplication template
614

@@ -52,13 +60,16 @@
5260
const ap_uint<config{index}::filt_width> config{index}::pixels[] = {{{instructions}}};\n"""
5361

5462
conv1d_function_template = 'nnet::conv_1d_{data_format}<{input_t}, {output_t}, {config}>({input}, {output}, {w}, {b});'
63+
depthconv1d_function_template = (
64+
'nnet::depthwise_conv_1d_{data_format}<{input_t}, {output_t}, {config}>({input}, {output}, {w}, {b});'
65+
)
5566

5667
conv1d_include_list = ['nnet_utils/nnet_conv1d.h', 'nnet_utils/nnet_conv1d_stream.h']
5768

5869

5970
class Conv1DConfigTemplate(LayerConfigTemplate):
6071
def __init__(self):
61-
super().__init__(Conv1D)
72+
super().__init__((Conv1D, DepthwiseConv1D))
6273
self.template = conv1d_config_template
6374
self.mult_template = conv_mult_config_template
6475

@@ -106,6 +117,12 @@ def format(self, node):
106117
return self.template.format(**params)
107118

108119

120+
class DepthwiseConv1DFunctionTemplate(Conv1DFunctionTemplate):
121+
def __init__(self):
122+
super(Conv1DFunctionTemplate, self).__init__(DepthwiseConv1D, include_header=sepconv1d_include_list)
123+
self.template = depthconv1d_function_template
124+
125+
109126
# Conv2D Templates
110127

111128
conv2d_config_template = """struct config{index} : nnet::conv2d_config {{

hls4ml/converters/keras/convolution.py

Lines changed: 10 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
from hls4ml.converters.utils import compute_padding_1d, compute_padding_2d, parse_data_format
33

44

5-
@keras_handler('Conv1D', 'SeparableConv1D')
5+
@keras_handler('Conv1D', 'SeparableConv1D', 'DepthwiseConv1D')
66
def parse_conv1d_layer(keras_layer, input_names, input_shapes, data_reader):
77
assert 'Conv1D' in keras_layer['class_name']
88

@@ -12,14 +12,19 @@ def parse_conv1d_layer(keras_layer, input_names, input_shapes, data_reader):
1212

1313
if layer['class_name'] in ['Conv1D', 'QConv1D']:
1414
layer['weight_data'] = get_weights_data(data_reader, layer['name'], 'kernel')
15-
else: # SeparableConv1D
16-
layer['depthwise_data'], layer['pointwise_data'], layer['bias_data'] = get_weights_data(
17-
data_reader, layer['name'], ['depthwise_kernel', 'pointwise_kernel', 'bias']
15+
elif layer['class_name'] in ['SeparableConv1D', 'QSeparableConv1D']:
16+
layer['depthwise_data'], layer['pointwise_data'] = get_weights_data(
17+
data_reader, layer['name'], ['depthwise_kernel', 'pointwise_kernel']
1818
)
19+
else: # DepthwiseConv1D
20+
layer['depthwise_data'] = get_weights_data(data_reader, layer['name'], 'depthwise_kernel')
1921

2022
layer['bias_data'] = get_weights_data(data_reader, layer['name'], 'bias')
2123

22-
layer['n_filt'] = keras_layer['config']['filters']
24+
if 'filters' in keras_layer['config']:
25+
layer['n_filt'] = keras_layer['config']['filters']
26+
else:
27+
layer['n_filt'] = layer['n_chan']
2328
layer['filt_width'] = keras_layer['config']['kernel_size'][0]
2429
layer['stride_width'] = keras_layer['config']['strides'][0]
2530
layer['padding'] = keras_layer['config']['padding']

hls4ml/converters/keras/qkeras.py

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -49,6 +49,19 @@ def parse_qconv_layer(keras_layer, input_names, input_shapes, data_reader):
4949
return layer, output_shape
5050

5151

52+
@keras_handler('QDepthwiseConv2D')
53+
def parse_qdepthwiseqconv_layer(keras_layer, input_names, input_shapes, data_reader):
54+
layer, output_shape = parse_conv2d_layer(keras_layer, input_names, input_shapes, data_reader)
55+
56+
layer['depthwise_quantizer'] = get_quantizer_from_config(keras_layer, 'depthwise')
57+
if keras_layer['config']['bias_quantizer'] is not None:
58+
layer['bias_quantizer'] = get_quantizer_from_config(keras_layer, 'bias')
59+
else:
60+
layer['bias_quantizer'] = None
61+
62+
return layer, output_shape
63+
64+
5265
@keras_handler('QActivation')
5366
def parse_qactivation_layer(keras_layer, input_names, input_shapes, data_reader):
5467
assert keras_layer['class_name'] == 'QActivation'

hls4ml/converters/keras_to_hls.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -301,7 +301,7 @@ def parse_keras_model(model_arch, reader):
301301
act_layer['class_name'] = 'QActivation'
302302
act_layer['config'] = {
303303
'name': layer['name'] + '_' + act_details['class_name'],
304-
'activation': act_details['class_name'],
304+
'activation': act_details,
305305
}
306306
act_layer, output_shape = layer_handlers['QActivation'](act_layer, None, [output_shape], reader)
307307
else:

hls4ml/converters/pytorch/convolution.py

Lines changed: 15 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
from hls4ml.converters.pytorch_to_hls import get_weights_data, pytorch_handler
1+
from hls4ml.converters.pytorch_to_hls import pytorch_handler
22
from hls4ml.converters.utils import compute_padding_1d_pytorch, compute_padding_2d_pytorch, parse_data_format
33

44

@@ -9,11 +9,16 @@ def parse_conv1d_layer(operation, layer_name, input_names, input_shapes, node, c
99
layer = {}
1010

1111
layer['name'] = layer_name
12+
layer['inputs'] = input_names
1213
layer['class_name'] = 'Conv1D'
1314
layer['data_format'] = 'channels_first' # Pytorch default (can't change)
1415

15-
layer['weight_data'] = get_weights_data(data_reader, layer['name'], 'weight')
16-
layer['bias_data'] = get_weights_data(data_reader, layer['name'], 'bias')
16+
layer['weight_data'] = class_object.weight.data.numpy()
17+
if class_object.bias is not None:
18+
layer['bias_data'] = class_object.bias.data.numpy()
19+
else:
20+
layer['bias_data'] = None
21+
1722
# Input info
1823
(layer['in_width'], layer['n_chan']) = parse_data_format(
1924
input_shapes[0], 'channels_first'
@@ -54,11 +59,16 @@ def parse_conv2d_layer(operation, layer_name, input_names, input_shapes, node, c
5459
layer = {}
5560

5661
layer['name'] = layer_name
62+
layer['inputs'] = input_names
5763
layer['class_name'] = 'Conv2D'
5864
layer['data_format'] = 'channels_first' # Pytorch default (can't change)
5965

60-
layer['weight_data'] = get_weights_data(data_reader, layer['name'], 'weight')
61-
layer['bias_data'] = get_weights_data(data_reader, layer['name'], 'bias')
66+
layer['weight_data'] = class_object.weight.data.numpy()
67+
if class_object.bias is not None:
68+
layer['bias_data'] = class_object.bias.data.numpy()
69+
else:
70+
layer['bias_data'] = None
71+
6272
# Input info
6373
(layer['in_height'], layer['in_width'], layer['n_chan']) = parse_data_format(
6474
input_shapes[0], 'channels_first'

hls4ml/converters/pytorch/core.py

Lines changed: 16 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
from hls4ml.converters.pytorch_to_hls import get_weights_data, pytorch_handler
1+
from hls4ml.converters.pytorch_to_hls import pytorch_handler
22

33

44
@pytorch_handler('Linear')
@@ -9,8 +9,14 @@ def parse_linear_layer(operation, layer_name, input_names, input_shapes, node, c
99

1010
layer['class_name'] = 'Dense'
1111
layer['name'] = layer_name
12+
layer['inputs'] = input_names
13+
14+
layer['weight_data'] = class_object.weight.data.numpy()
15+
if class_object.bias is not None:
16+
layer['bias_data'] = class_object.bias.data.numpy()
17+
else:
18+
layer['bias_data'] = None
1219

13-
layer['weight_data'], layer['bias_data'] = get_weights_data(data_reader, layer['name'], ['weight', 'bias'])
1420
if class_object is not None:
1521
layer['n_in'] = class_object.in_features
1622
layer['n_out'] = class_object.out_features
@@ -39,6 +45,7 @@ def parse_activation_layer(operation, layer_name, input_names, input_shapes, nod
3945
layer['class_name'] = operation
4046
layer['activation'] = layer['class_name']
4147
layer['name'] = layer_name
48+
layer['inputs'] = input_names
4249

4350
# if layer['class_name'] != 'Activation':
4451
# layer['activation'] = layer['class_name']
@@ -50,7 +57,7 @@ def parse_activation_layer(operation, layer_name, input_names, input_shapes, nod
5057
if layer['class_name'] == 'ELU':
5158
layer['activ_param'] = class_object.alpha
5259
if layer['class_name'] == 'PReLU':
53-
layer['alpha_data'] = get_weights_data(data_reader, layer['name'], 'weight')
60+
layer['alpha_data'] = class_object.weight.data.numpy()
5461
if layer['class_name'] == 'Threshold':
5562
layer['activ_param'] = class_object.threshold
5663
layer['class_name'] = 'ThresholdedReLU'
@@ -92,25 +99,26 @@ def parse_batchnorm_layer(operation, layer_name, input_names, input_shapes, node
9299
layer['class_name'] = 'BatchNormalization'
93100
layer['data_format'] = 'channels_first'
94101
layer['name'] = layer_name
102+
layer['inputs'] = input_names
95103

96104
# batchnorm para
97105
if node.op == 'call_module':
98106
layer['epsilon'] = class_object.eps
99107
layer['use_gamma'] = layer['use_beta'] = class_object.affine
100108

101109
if layer['use_gamma']:
102-
layer['gamma_data'] = get_weights_data(data_reader, layer['name'], 'weight')
110+
layer['gamma_data'] = class_object.weight.data.numpy()
103111
else:
104112
layer['gamma_data'] = 1
105113

106114
if layer['use_beta']:
107-
layer['beta_data'] = get_weights_data(data_reader, layer['name'], 'bias')
115+
layer['beta_data'] = class_object.bias.data.numpy()
108116
else:
109117
layer['beta_data'] = 0
110118

111-
layer['mean_data'], layer['variance_data'] = get_weights_data(
112-
data_reader, layer['name'], ['running_mean', 'running_var']
113-
)
119+
layer['mean_data'] = class_object.running_mean.data.numpy()
120+
layer['variance_data'] = class_object.running_var.data.numpy()
121+
114122
in_size = 1
115123
for dim in input_shapes[0][1:]:
116124
in_size *= dim

hls4ml/converters/pytorch/pooling.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@ def parse_pooling_layer(operation, layer_name, input_names, input_shapes, node,
2020
layer['class_name'] = 'AveragePooling2D'
2121

2222
layer['name'] = layer_name
23+
layer['inputs'] = input_names
2324
layer['data_format'] = 'channels_first' # Pytorch default (can't change)
2425
if node.op == 'call_module' and 'Avg' in operation:
2526
if class_object.count_include_pad:

hls4ml/converters/pytorch/recurrent.py

Lines changed: 5 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22

33
import numpy as np
44

5-
from hls4ml.converters.pytorch_to_hls import get_weights_data, pytorch_handler
5+
from hls4ml.converters.pytorch_to_hls import pytorch_handler
66

77
rnn_layers = ['RNN', 'LSTM', 'GRU']
88

@@ -55,12 +55,10 @@ def parse_rnn_layer(operation, layer_name, input_names, input_shapes, node, clas
5555
if class_object.dropout > 0:
5656
raise Exception('hls4ml does not support RNNs with dropout')
5757

58-
(
59-
layer['weight_data'],
60-
layer['recurrent_weight_data'],
61-
layer['bias_data'],
62-
layer['recurrent_bias_data'],
63-
) = get_weights_data(data_reader, layer['name'], ['weight_ih_l0', 'weight_hh_l0', 'bias_ih_l0', 'bias_hh_l0'])
58+
layer['weight_data'] = class_object.weight_ih_l0.data.numpy()
59+
layer['recurrent_weight_data'] = class_object.weight_hh_l0.data.numpy()
60+
layer['bias_data'] = class_object.bias_ih_l0.data.numpy()
61+
layer['recurrent_bias_data'] = class_object.bias_hh_l0.data.numpy()
6462

6563
if class_object.bias is False:
6664
layer['bias_data'] = np.zeros(layer['weight_data'].shape[0])

hls4ml/converters/pytorch/reshape.py

Lines changed: 79 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@ def parse_reshape_layer(operation, layer_name, input_names, input_shapes, node,
1212
layer = {}
1313
layer['class_name'] = 'Reshape'
1414
layer['name'] = layer_name
15+
layer['inputs'] = input_names
1516

1617
layer['target_shape'] = [int(i) for i in node.args[1:]]
1718
# View can have -1 as one as the dimensions,
@@ -27,3 +28,81 @@ def parse_reshape_layer(operation, layer_name, input_names, input_shapes, node,
2728
output_shape = input_shapes[0][:1] + layer['target_shape']
2829

2930
return layer, output_shape
31+
32+
33+
@pytorch_handler('squeeze')
34+
def parse_squeeze_layer(operation, layer_name, input_names, input_shapes, node, class_object, data_reader, config):
35+
assert operation == 'squeeze'
36+
37+
layer = {}
38+
layer['class_name'] = 'Reshape'
39+
layer['name'] = layer_name
40+
41+
if len(node.args) > 1 or len(node.kwargs) > 0: # 'dim' argument is specified
42+
output_shape = [i for i in input_shapes[0]]
43+
squeeze_dim = node.kwargs.get('dim', None)
44+
if squeeze_dim is None:
45+
squeeze_dim = node.args[1]
46+
if isinstance(squeeze_dim, tuple):
47+
for dim in squeeze_dim:
48+
del output_shape[dim]
49+
else:
50+
del output_shape[squeeze_dim]
51+
else:
52+
output_shape = [i for i in input_shapes[0] if i != 1]
53+
54+
layer['target_shape'] = output_shape.copy()
55+
if layer['target_shape'][0] is None:
56+
del layer['target_shape'][0]
57+
58+
return layer, output_shape
59+
60+
61+
@pytorch_handler('unsqueeze')
62+
def parse_unsqueeze_layer(operation, layer_name, input_names, input_shapes, node, class_object, data_reader, config):
63+
assert operation == 'unsqueeze'
64+
65+
layer = {}
66+
layer['class_name'] = 'Reshape'
67+
layer['name'] = layer_name
68+
layer['inputs'] = input_names
69+
70+
# Unlike in 'squeeze' in 'unsqueeze', dim argument must exist
71+
output_shape = [i for i in input_shapes[0]]
72+
if len(node.args) > 1: # Specified as unsqueeze(x, n)
73+
squeeze_dim = node.args[1]
74+
else: # Specified as unsqueeze(x, dim=n)
75+
squeeze_dim = node.kwargs['dim']
76+
# insert() will add an element before the index, unsqueeze expects the location
77+
index = output_shape.index(output_shape[squeeze_dim]) # + 1
78+
output_shape.insert(index, 1)
79+
80+
layer['target_shape'] = output_shape.copy()
81+
if layer['target_shape'][0] is None:
82+
del layer['target_shape'][0]
83+
84+
return layer, output_shape
85+
86+
87+
@pytorch_handler('Flatten')
88+
def parse_flatten_layer(operation, layer_name, input_names, input_shapes, node, class_object, data_reader, config):
89+
assert operation == 'Flatten'
90+
91+
layer = {}
92+
layer['class_name'] = 'Reshape'
93+
layer['name'] = layer_name
94+
layer['inputs'] = input_names
95+
96+
start_dim = class_object.start_dim
97+
end_dim = class_object.end_dim
98+
if end_dim + 1 == 0 or end_dim + 1 > len(input_shapes[0]):
99+
end_dim = len(input_shapes[0])
100+
else:
101+
end_dim = end_dim + 1
102+
103+
layer['target_shape'] = (
104+
input_shapes[0][0:start_dim] + [np.prod(input_shapes[0][start_dim:end_dim])] + input_shapes[0][end_dim:]
105+
)
106+
output_shape = layer['target_shape']
107+
108+
return layer, output_shape

0 commit comments

Comments
 (0)