Skip to content

Commit 583a8c2

Browse files
committed
In softmax, max axis -1 if it's a positive index that's identical
1 parent accadaf commit 583a8c2

File tree

2 files changed

+83
-34
lines changed

2 files changed

+83
-34
lines changed

hls4ml/converters/onnx/core.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -62,6 +62,9 @@ def parse_activation_layer(node, input_names, input_shapes, graph):
6262
if layer['class_name'] == 'Softmax':
6363
layer['activation'] = 'softmax'
6464
layer['axis'] = get_onnx_attribute(node, 'axis', -1)
65+
# because -1 is better supported than an explicit index, check if it's the same
66+
if layer['axis'] == len(input_shapes[0]) - 1:
67+
layer['axis'] = -1
6568

6669
elif layer['class_name'] in ['ELU', 'LeakyReLU', 'ThresholdedReLU']:
6770
layer['activation'] = layer['class_name']

test/pytest/test_qonnx.py

Lines changed: 80 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -16,18 +16,7 @@
1616
test_root_path = Path(__file__).parent
1717
example_model_path = (test_root_path / '../../example-models').resolve()
1818

19-
20-
@pytest.fixture(scope='module')
21-
def sep_conv_model():
22-
"""
23-
Load separabale conv model, already channels-last and cleaned
24-
"""
25-
dl_file = str(example_model_path / "onnx/separable_conv_model_ch_last.onnx")
26-
assert os.path.isfile(dl_file)
27-
28-
model = ModelWrapper(dl_file)
29-
30-
return model
19+
# The models
3120

3221

3322
@pytest.fixture(scope='module')
@@ -97,31 +86,33 @@ def jettagging_model():
9786
return model
9887

9988

100-
@pytest.mark.parametrize('backend', ['Vitis'])
101-
def test_sep_conv(sep_conv_model, backend):
102-
model = sep_conv_model
103-
ishape = tuple(model.get_tensor_shape(model.graph.input[0].name))
104-
X = np.random.uniform(low=0, high=1, size=np.prod(ishape)).reshape(ishape)
105-
X = (np.round(X * 2**16) * 2**-16).astype(np.float32)
106-
idict = {model.graph.input[0].name: X}
107-
y_qonnx = oxe.execute_onnx(model, idict)[model.graph.output[0].name]
89+
@pytest.fixture(scope='module')
90+
def sep_conv_model():
91+
"""
92+
Load separabale conv model, already channels-last and cleaned
93+
"""
94+
dl_file = str(example_model_path / "onnx/separable_conv_model_ch_last.onnx")
95+
assert os.path.isfile(dl_file)
10896

109-
config = hls4ml.utils.config.config_from_onnx_model(
110-
model, granularity='name', backend=backend, default_precision='fixed<32,16>'
111-
)
97+
model = ModelWrapper(dl_file)
11298

113-
hls_model = hls4ml.converters.convert_from_onnx_model(
114-
model,
115-
output_dir=str(test_root_path / f'hls4mlprj_qonnx_sep_conv_{backend}'),
116-
io_type='io_stream',
117-
backend=backend,
118-
hls_config=config,
119-
)
120-
hls_model.compile()
121-
y_hls4ml = hls_model.predict(np.ascontiguousarray(X))
99+
return model
100+
101+
102+
@pytest.fixture(scope='module')
103+
def three_layer_keras_model():
104+
"""
105+
Load a simple, originally keras unquantized model
106+
"""
107+
dl_file = str(example_model_path / "onnx/three_layer_keras.onnx")
108+
assert os.path.isfile(dl_file)
109+
110+
model = ModelWrapper(dl_file)
111+
model = qonnx.util.cleanup.cleanup_model(model)
112+
return model
122113

123-
np.testing.assert_allclose(y_qonnx.ravel(), y_hls4ml.ravel(), atol=1e-2, rtol=1)
124-
print('test')
114+
115+
# The actual tests
125116

126117

127118
@pytest.mark.parametrize('backend', ['Vivado', 'Vitis', 'Quartus'])
@@ -197,3 +188,58 @@ def test_jet_tagging(jettagging_model, backend):
197188
y_hls4ml = hls_model.predict(X)
198189

199190
np.testing.assert_allclose(y_qonnx.ravel(), y_hls4ml.ravel(), atol=1e-2, rtol=1)
191+
192+
193+
@pytest.mark.parametrize('backend', ['Vitis'])
194+
def test_sep_conv(sep_conv_model, backend):
195+
model = sep_conv_model
196+
ishape = tuple(model.get_tensor_shape(model.graph.input[0].name))
197+
X = np.random.uniform(low=0, high=1, size=np.prod(ishape)).reshape(ishape)
198+
X = (np.round(X * 2**16) * 2**-16).astype(np.float32)
199+
idict = {model.graph.input[0].name: X}
200+
y_qonnx = oxe.execute_onnx(model, idict)[model.graph.output[0].name]
201+
202+
config = hls4ml.utils.config.config_from_onnx_model(
203+
model, granularity='name', backend=backend, default_precision='fixed<32,16>'
204+
)
205+
206+
hls_model = hls4ml.converters.convert_from_onnx_model(
207+
model,
208+
output_dir=str(test_root_path / f'hls4mlprj_qonnx_sep_conv_{backend}'),
209+
io_type='io_stream',
210+
backend=backend,
211+
hls_config=config,
212+
)
213+
hls_model.compile()
214+
y_hls4ml = hls_model.predict(np.ascontiguousarray(X))
215+
216+
np.testing.assert_allclose(y_qonnx.ravel(), y_hls4ml.ravel(), atol=1e-2, rtol=1)
217+
218+
219+
@pytest.mark.parametrize('backend', ['Vitis'])
220+
@pytest.mark.parametrize('io_type', ['io_parallel', 'io_stream'])
221+
def test_three_layer_keras(three_layer_keras_model, io_type, backend):
222+
model = three_layer_keras_model
223+
ishape = tuple(model.get_tensor_shape(model.graph.input[0].name))
224+
X = np.random.uniform(low=0, high=1, size=np.prod(ishape)).reshape(ishape)
225+
X = (np.round(X * 2**16) * 2**-16).astype(np.float32)
226+
idict = {model.graph.input[0].name: X}
227+
y_qonnx = oxe.execute_onnx(model, idict)[model.graph.output[0].name]
228+
229+
config = hls4ml.utils.config.config_from_onnx_model(
230+
model, granularity='name', backend=backend, default_precision='fixed<32,16>'
231+
)
232+
233+
config['LayerName']['Softmax_0']['Implementation'] = 'legacy'
234+
235+
hls_model = hls4ml.converters.convert_from_onnx_model(
236+
model,
237+
output_dir=str(test_root_path / f'hls4mlprj_onnx_three_layer_keras_{io_type}_{backend}'),
238+
io_type=io_type,
239+
backend=backend,
240+
hls_config=config,
241+
)
242+
hls_model.compile()
243+
y_hls4ml = hls_model.predict(X)
244+
245+
np.testing.assert_allclose(y_qonnx.ravel(), y_hls4ml.ravel(), atol=1e-2, rtol=1)

0 commit comments

Comments
 (0)