Skip to content

Commit b55cd04

Browse files
author
Enrico Lupi
committed
ADD pytest for Bidirectional layer
1 parent b65c730 commit b55cd04

File tree

1 file changed

+148
-109
lines changed

1 file changed

+148
-109
lines changed

test/pytest/test_rnn.py

Lines changed: 148 additions & 109 deletions
Original file line numberDiff line numberDiff line change
@@ -9,165 +9,204 @@
99

1010
test_root_path = Path(__file__).parent
1111

12-
rnn_layers = [SimpleRNN, LSTM, GRU]
12+
rnn_layers = [SimpleRNN, LSTM, GRU, Bidirectional]
1313

1414

15-
@pytest.mark.parametrize('rnn_layer', rnn_layers)
16-
@pytest.mark.parametrize('return_sequences', [True, False])
17-
@pytest.mark.parametrize('bidirectional', [True, False])
18-
def test_rnn_parsing(rnn_layer, return_sequences, bidirectional):
19-
20-
if rnn_layer is SimpleRNN and bidirectional:
21-
pytest.skip("SimpleRNN does not support bidirectional layers")
22-
15+
def create_model_parsing(rnn_layer, return_sequences):
2316
time_steps = 3
2417
input_size = 8
2518
input_shape = (time_steps, input_size)
2619

2720
model_input = Input(shape=input_shape)
28-
if not bidirectional:
21+
if rnn_layer.__name__ != 'Bidirectional':
2922
model_output = rnn_layer(64, return_sequences=return_sequences)(model_input)
3023
else:
31-
model_output = Bidirectional(rnn_layer(64, return_sequences=return_sequences))(model_input)
24+
forward_layer = LSTM(37, return_sequences=return_sequences)
25+
bacwkard_layer = GRU(27, return_sequences=return_sequences, go_backwards=True)
26+
model_output = rnn_layer(forward_layer, backward_layer=bacwkard_layer)(model_input)
3227

3328
model = Model(model_input, model_output)
3429
model.compile(optimizer='adam', loss='mse')
3530

36-
config = hls4ml.utils.config_from_keras_model(model, granularity='name', backend='Vivado')
37-
prj_name = f'hls4mlprj_rnn_{rnn_layer.__class__.__name__.lower()}_seq_{int(return_sequences)}'
38-
output_dir = str(test_root_path / prj_name)
39-
hls_model = hls4ml.converters.convert_from_keras_model(model, hls_config=config, output_dir=output_dir)
31+
return model
4032

41-
hls_layer = list(hls_model.get_layers())[1] # 0 is input, 1 is the RNN layer
42-
keras_layer = model.layers[1]
4333

44-
# Basic sanity check, I/O, activations
45-
if not bidirectional:
46-
assert hls_layer.class_name == rnn_layer.__name__
34+
def compare_attributes(hls_layer, keras_layer):
35+
assert hls_layer.class_name == keras_layer.__class__.__name__
36+
assert hls_layer.get_input_variable().shape == list(keras_layer.input_shape)[1:] # Ignore the batch size
37+
assert hls_layer.get_output_variable().shape == list(keras_layer.output_shape)[1:] # Ignore the batch size
38+
if keras_layer.__class__.__name__ != 'Bidirectional':
4739
assert hls_layer.attributes['n_out'] == keras_layer.units
4840
assert hls_layer.attributes['activation'] == keras_layer.activation.__name__
4941
if 'recurrent_activation' in hls_layer.attributes: # SimpleRNN doesn't have this
5042
assert hls_layer.attributes['recurrent_activation'] == keras_layer.recurrent_activation.__name__
51-
assert hls_layer.get_input_variable().shape == list(input_shape)
52-
assert hls_layer.get_output_variable().shape == model_output.shape.as_list()[1:] # Ignore the batch size
5343
else:
54-
assert hls_layer.class_name == 'Bidirectional' + rnn_layer.__name__
5544
assert hls_layer.attributes['merge_mode'] == keras_layer.merge_mode
56-
if hls_layer.attributes['merge_mode'] == 'concat':
57-
assert hls_layer.attributes['n_out'] == 2 * keras_layer.forward_layer.units
45+
n_out = 0
46+
for inner_layer, direction in [(keras_layer.forward_layer, 'forward'), (keras_layer.backward_layer, 'backward')]:
47+
assert hls_layer.attributes[f'{direction}_n_states'] == inner_layer.units
48+
if hls_layer.attributes['merge_mode'] == 'concat':
49+
n_out += inner_layer.units
50+
else:
51+
n_out = inner_layer.units
52+
assert hls_layer.attributes[f'{direction}_activation'] == inner_layer.activation.__name__
53+
if f'{direction}_recurrent_activation' in hls_layer.attributes: # SimpleRNN doesn't have this
54+
assert hls_layer.attributes[f'{direction}_recurrent_activation'] == inner_layer.recurrent_activation.__name__
55+
assert hls_layer.attributes['n_out'] == n_out
56+
57+
58+
def compare_weights(hls_weights, keras_weights, keras_layer):
59+
def comparison(hls_weights, keras_weights, class_name):
60+
assert hls_weights[0].data.shape == keras_weights[0].shape
61+
assert hls_weights[1].data.shape == keras_weights[1].shape
62+
if class_name == 'GRU':
63+
# GRU has both bias and recurrent bias
64+
assert hls_weights[2].data.shape == keras_weights[2][0].shape
65+
assert hls_weights[3].data.shape == keras_weights[2][1].shape
5866
else:
59-
assert hls_layer.attributes['n_out'] == keras_layer.forward_layer.units
60-
assert hls_layer.attributes['activation'] == keras_layer.forward_layer.activation.__name__
61-
if 'recurrent_activation' in hls_layer.attributes: # SimpleRNN doesn't have this
62-
assert hls_layer.attributes['recurrent_activation'] == keras_layer.forward_layer.recurrent_activation.__name__
63-
assert hls_layer.get_input_variable().shape == list(input_shape)
64-
assert hls_layer.get_output_variable().shape == model_output.shape.as_list()[1:] # Ignore the batch size
67+
# LSTM and SimpleRNN only have bias
68+
assert hls_weights[2].data.shape == keras_weights[2].shape
69+
70+
np.testing.assert_array_equal(hls_weights[0].data, keras_weights[0])
71+
np.testing.assert_array_equal(hls_weights[1].data, keras_weights[1])
72+
if class_name == 'GRU':
73+
np.testing.assert_array_equal(hls_weights[2].data, keras_weights[2][0])
74+
np.testing.assert_array_equal(hls_weights[3].data, keras_weights[2][1])
75+
else:
76+
np.testing.assert_array_equal(hls_weights[2].data, keras_weights[2])
77+
78+
if keras_layer.__class__.__name__ != 'Bidirectional':
79+
comparison(hls_weights, keras_weights, keras_layer.__class__.__name__)
80+
else:
81+
for i, inner_layer in enumerate([keras_layer.forward_layer, keras_layer.backward_layer]):
82+
comparison(hls_weights[4 * i : 4 * (i + 1)], keras_weights[3 * i : 3 * (i + 1)], inner_layer.__class__.__name__)
83+
84+
85+
@pytest.mark.parametrize('rnn_layer', rnn_layers)
86+
@pytest.mark.parametrize('return_sequences', [True, False])
87+
def test_rnn_parsing(rnn_layer, return_sequences):
88+
89+
model = create_model_parsing(rnn_layer, return_sequences)
90+
91+
config = hls4ml.utils.config_from_keras_model(model, granularity='name', backend='Vivado')
92+
prj_name = f'hls4mlprj_rnn_{rnn_layer.__class__.__name__.lower()}_seq_{int(return_sequences)}'
93+
output_dir = str(test_root_path / prj_name)
94+
hls_model = hls4ml.converters.convert_from_keras_model(model, hls_config=config, output_dir=output_dir)
95+
96+
hls_layer = list(hls_model.get_layers())[1] # 0 is input, 1 is the RNN layer
97+
keras_layer = model.layers[1]
98+
99+
# Basic sanity check, I/O, activations
100+
compare_attributes(hls_layer, keras_layer)
65101

66102
# Compare weights
67103
hls_weights = list(hls_layer.get_weights()) # [weights, recurrent_weights, bias, recurrent_bias]
68-
rnn_weights = keras_layer.get_weights() # [weights, recurrent_weights, bias]
69-
70-
assert hls_weights[0].data.shape == rnn_weights[0].shape
71-
assert hls_weights[1].data.shape == rnn_weights[1].shape
72-
if 'gru' in rnn_layer.__name__.lower():
73-
# GRU has both bias and recurrent bias
74-
assert hls_weights[2].data.shape == rnn_weights[2][0].shape
75-
assert hls_weights[3].data.shape == rnn_weights[2][1].shape
76-
else:
77-
# LSTM and SimpleRNN only have bias
78-
assert hls_weights[2].data.shape == rnn_weights[2].shape
79-
80-
np.testing.assert_array_equal(hls_weights[0].data, rnn_weights[0])
81-
np.testing.assert_array_equal(hls_weights[1].data, rnn_weights[1])
82-
if 'gru' in rnn_layer.__name__.lower():
83-
np.testing.assert_array_equal(hls_weights[2].data, rnn_weights[2][0])
84-
np.testing.assert_array_equal(hls_weights[3].data, rnn_weights[2][1])
104+
keras_weights = keras_layer.get_weights() # [weights, recurrent_weights, bias]
105+
compare_weights(hls_weights, keras_weights, keras_layer)
106+
107+
108+
def create_model_accuracy(rnn_layer, return_sequences):
109+
# Subtract 0.5 to include negative values
110+
input_shape = (12, 8)
111+
X = np.random.rand(50, *input_shape) - 0.5
112+
113+
layer_name = rnn_layer.__name__
114+
model = Sequential()
115+
model.add(Input(shape=input_shape))
116+
if layer_name != 'Bidirectional':
117+
test_layer = rnn_layer(
118+
units=32,
119+
input_shape=input_shape,
120+
kernel_initializer='lecun_uniform',
121+
recurrent_initializer='lecun_uniform',
122+
bias_initializer='lecun_uniform',
123+
return_sequences=return_sequences,
124+
name=layer_name,
125+
)
85126
else:
86-
np.testing.assert_array_equal(hls_weights[2].data, rnn_weights[2])
127+
test_layer = Bidirectional(
128+
LSTM(
129+
units=15,
130+
input_shape=input_shape,
131+
kernel_initializer='lecun_uniform',
132+
recurrent_initializer='lecun_uniform',
133+
bias_initializer='lecun_uniform',
134+
return_sequences=return_sequences,
135+
),
136+
backward_layer=GRU(
137+
units=17,
138+
input_shape=input_shape,
139+
kernel_initializer='lecun_uniform',
140+
recurrent_initializer='lecun_uniform',
141+
bias_initializer='lecun_uniform',
142+
return_sequences=return_sequences,
143+
go_backwards=True,
144+
),
145+
name=layer_name,
146+
)
147+
model.add(test_layer)
148+
model.compile()
149+
return model, X
87150

88151

89152
@pytest.mark.parametrize(
90-
'rnn_layer, bidirectional, backend, io_type, strategy',
153+
'rnn_layer, backend, io_type, strategy',
91154
[
92-
(SimpleRNN, False, 'Quartus', 'io_parallel', 'resource'),
93-
(SimpleRNN, False, 'oneAPI', 'io_parallel', 'resource'),
94-
(LSTM, False, 'Vivado', 'io_parallel', 'resource'),
95-
(LSTM, False, 'Vivado', 'io_parallel', 'latency'),
96-
(LSTM, False, 'Vitis', 'io_parallel', 'resource'),
97-
(LSTM, False, 'Vitis', 'io_parallel', 'latency'),
98-
(LSTM, True, 'Vivado', 'io_parallel', 'resource'),
99-
(LSTM, True, 'Vivado', 'io_parallel', 'latency'),
100-
(LSTM, True, 'Vitis', 'io_parallel', 'resource'),
101-
(LSTM, True, 'Vitis', 'io_parallel', 'latency'),
102-
(LSTM, False, 'Quartus', 'io_parallel', 'resource'),
103-
(LSTM, False, 'oneAPI', 'io_parallel', 'resource'),
104-
(LSTM, False, 'Vivado', 'io_stream', 'resource'),
105-
(LSTM, False, 'Vivado', 'io_stream', 'latency'),
106-
(LSTM, False, 'Vitis', 'io_stream', 'resource'),
107-
(LSTM, False, 'Vitis', 'io_stream', 'latency'),
108-
(GRU, False, 'Vivado', 'io_parallel', 'resource'),
109-
(GRU, False, 'Vivado', 'io_parallel', 'latency'),
110-
(GRU, False, 'Vitis', 'io_parallel', 'resource'),
111-
(GRU, False, 'Vitis', 'io_parallel', 'latency'),
112-
(GRU, True, 'Vivado', 'io_parallel', 'resource'),
113-
(GRU, True, 'Vivado', 'io_parallel', 'latency'),
114-
(GRU, True, 'Vitis', 'io_parallel', 'resource'),
115-
(GRU, True, 'Vitis', 'io_parallel', 'latency'),
116-
(GRU, False, 'Quartus', 'io_parallel', 'resource'),
117-
(GRU, False, 'oneAPI', 'io_parallel', 'resource'),
118-
(GRU, False, 'Vivado', 'io_stream', 'resource'),
119-
(GRU, False, 'Vivado', 'io_stream', 'latency'),
120-
(GRU, False, 'Vitis', 'io_stream', 'resource'),
121-
(GRU, False, 'Vitis', 'io_stream', 'latency'),
122-
(GRU, False, 'Quartus', 'io_stream', 'resource'),
123-
(GRU, False, 'oneAPI', 'io_stream', 'resource'),
155+
(SimpleRNN, 'Quartus', 'io_parallel', 'resource'),
156+
(SimpleRNN, 'oneAPI', 'io_parallel', 'resource'),
157+
(LSTM, 'Vivado', 'io_parallel', 'resource'),
158+
(LSTM, 'Vivado', 'io_parallel', 'latency'),
159+
(LSTM, 'Vitis', 'io_parallel', 'resource'),
160+
(LSTM, 'Vitis', 'io_parallel', 'latency'),
161+
(LSTM, 'Quartus', 'io_parallel', 'resource'),
162+
(LSTM, 'oneAPI', 'io_parallel', 'resource'),
163+
(LSTM, 'Vivado', 'io_stream', 'resource'),
164+
(LSTM, 'Vivado', 'io_stream', 'latency'),
165+
(LSTM, 'Vitis', 'io_stream', 'resource'),
166+
(LSTM, 'Vitis', 'io_stream', 'latency'),
167+
(GRU, 'Vivado', 'io_parallel', 'resource'),
168+
(GRU, 'Vivado', 'io_parallel', 'latency'),
169+
(GRU, 'Vitis', 'io_parallel', 'resource'),
170+
(GRU, 'Vitis', 'io_parallel', 'latency'),
171+
(GRU, 'Quartus', 'io_parallel', 'resource'),
172+
(GRU, 'oneAPI', 'io_parallel', 'resource'),
173+
(GRU, 'Vivado', 'io_stream', 'resource'),
174+
(GRU, 'Vivado', 'io_stream', 'latency'),
175+
(GRU, 'Vitis', 'io_stream', 'resource'),
176+
(GRU, 'Vitis', 'io_stream', 'latency'),
177+
(GRU, 'Quartus', 'io_stream', 'resource'),
178+
(GRU, 'oneAPI', 'io_stream', 'resource'),
179+
(Bidirectional, 'Vivado', 'io_parallel', 'resource'),
180+
(Bidirectional, 'Vivado', 'io_parallel', 'latency'),
181+
(Bidirectional, 'Vitis', 'io_parallel', 'resource'),
182+
(Bidirectional, 'Vitis', 'io_parallel', 'latency'),
124183
],
125184
)
126185
@pytest.mark.parametrize('return_sequences', [True, False])
127186
@pytest.mark.parametrize('static', [True, False])
128-
def test_rnn_accuracy(rnn_layer, bidirectional, return_sequences, backend, io_type, strategy, static):
129-
# Subtract 0.5 to include negative values
130-
input_shape = (12, 8)
131-
X = np.random.rand(50, *input_shape) - 0.5
132-
133-
layer_name = ("Bidirectional" if bidirectional else "") + rnn_layer.__name__
134-
keras_model = Sequential()
135-
keras_model.add(Input(shape=input_shape))
136-
test_layer = rnn_layer(
137-
units=32,
138-
input_shape=input_shape,
139-
kernel_initializer='lecun_uniform',
140-
recurrent_initializer='lecun_uniform',
141-
bias_initializer='lecun_uniform',
142-
return_sequences=return_sequences,
143-
name=layer_name,
144-
)
145-
if not bidirectional:
146-
keras_model.add(test_layer)
147-
else:
148-
keras_model.add(Bidirectional(test_layer, name=layer_name))
187+
def test_rnn_accuracy(rnn_layer, return_sequences, backend, io_type, strategy, static):
188+
layer_name = rnn_layer.__name__
149189

150-
keras_model.compile()
190+
model, X = create_model_accuracy(rnn_layer, return_sequences)
151191

152192
default_precision = 'ap_fixed<32, 16>' if backend in ['Vivado', 'Vitis'] else 'ac_fixed<32, 16, true>'
153193
hls_config = hls4ml.utils.config_from_keras_model(
154-
keras_model, granularity='name', default_precision=default_precision, backend=backend
194+
model, granularity='name', default_precision=default_precision, backend=backend
155195
)
156196
hls_config['LayerName'][layer_name]['static'] = static
157197
hls_config['LayerName'][layer_name]['Strategy'] = strategy
158198
prj_name = (
159199
'hls4mlprj_rnn_accuracy_'
160-
+ ('bidirectional_' if bidirectional else '')
161200
+ f'{layer_name}_static_{int(static)}_ret_seq_{int(return_sequences)}_'
162-
f'{backend}_{io_type}_{strategy}'
201+
+ f'{backend}_{io_type}_{strategy}'
163202
)
164203
output_dir = str(test_root_path / prj_name)
165204

166205
hls_model = hls4ml.converters.convert_from_keras_model(
167-
keras_model, hls_config=hls_config, output_dir=output_dir, backend=backend, io_type=io_type
206+
model, hls_config=hls_config, output_dir=output_dir, backend=backend, io_type=io_type
168207
)
169208
hls_model.compile()
170209

171-
keras_prediction = keras_model.predict(X)
210+
keras_prediction = model.predict(X)
172211
hls_prediction = hls_model.predict(X)
173212
np.testing.assert_allclose(hls_prediction.flatten(), keras_prediction.flatten(), rtol=0.0, atol=5e-2)

0 commit comments

Comments
 (0)