Skip to content

Commit fc2c68a

Browse files
committed
fix most pytest issues
1 parent 7adfe5b commit fc2c68a

File tree

3 files changed

+9
-9
lines changed

3 files changed

+9
-9
lines changed

hls4ml/templates/quartus/firmware/nnet_utils/nnet_conv1d_resource.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33

44
#include "nnet_common.h"
55
#include "nnet_dense.h"
6+
#include <cstdint>
67

78
namespace nnet {
89

hls4ml/templates/quartus/firmware/nnet_utils/nnet_conv2d_resource.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
#include "nnet_common.h"
55
#include "nnet_dense.h"
66
#include "nnet_helpers.h"
7+
#include <cstdint>
78

89
namespace nnet {
910

test/pytest/test_pytorch_api.py

Lines changed: 7 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -550,7 +550,7 @@ def test_pooling(pooling, padds, backend):
550550
# Verify correct parsing of layer
551551
hls_pool = list(hls_model.get_layers())[-2]
552552
if '2d' in pooling.__name__:
553-
assert hls_pool.attributes['name'] == poolNode.name
553+
assert hls_pool.attributes['name'] == "_" + poolNode.name.split("_")[-1]
554554
assert hls_pool.attributes['class_name'][-2] == str(2)
555555
assert hls_pool.attributes['stride_height'] == class_object_pool.stride
556556
assert hls_pool.attributes['stride_width'] == class_object_pool.stride
@@ -560,14 +560,14 @@ def test_pooling(pooling, padds, backend):
560560

561561
elif '1d' in pooling.__name__:
562562
if "Max" in pooling.__name__:
563-
assert hls_pool.attributes['name'] == poolNode.name
563+
assert hls_pool.attributes['name'] == "_" + poolNode.name.split("_")[-1]
564564
assert hls_pool.attributes['class_name'][-2] == str(1)
565565
assert hls_pool.attributes['pool_width'] == class_object_pool.kernel_size
566566
assert hls_pool.attributes['stride_width'] == class_object_pool.stride
567567
assert hls_pool.attributes['padding'] == 'valid' if class_object_pool.padding == 0 else 'same'
568568

569569
else:
570-
assert hls_pool.attributes['name'] == poolNode.name
570+
assert hls_pool.attributes['name'] == "_" + poolNode.name.split("_")[-1]
571571
assert hls_pool.attributes['class_name'][-2] == str(1)
572572
assert hls_pool.attributes['pool_width'] == class_object_pool.kernel_size[0]
573573
assert hls_pool.attributes['stride_width'] == class_object_pool.stride[0]
@@ -641,7 +641,7 @@ def test_squeeze(backend, io_type):
641641
pytorch_prediction = model(torch.Tensor(X_input)).detach().numpy().flatten()
642642

643643
config = config_from_pytorch_model(model)
644-
del config['Model']['InputsChannelLast'] # We don't want anything touched for this test
644+
del config['Model']['ChannelsLastConversion'] # We don't want anything touched for this test
645645
output_dir = str(test_root_path / f'hls4mlprj_pytorch_api_squeeze_{backend}_{io_type}')
646646

647647
hls_model = convert_from_pytorch_model(
@@ -719,7 +719,7 @@ def test_skipped_layers(backend, io_type):
719719
input_shape = (3, 8)
720720
batch_input_shape = (None,) + input_shape
721721
config = config_from_pytorch_model(
722-
model, default_precision='ap_fixed<32,16>', inputs_channel_last=True, transpose_outputs=False
722+
model, default_precision='ap_fixed<32,16>', channels_last_conversion="full", transpose_outputs=False
723723
)
724724
output_dir = str(test_root_path / f'hls4mlprj_pytorch_api_skipped_{backend}_{io_type}')
725725
hls_model = convert_from_pytorch_model(
@@ -734,10 +734,9 @@ def test_skipped_layers(backend, io_type):
734734
hls_model.compile()
735735

736736
input = torch.randn(10, 3, 8)
737-
hls_input = np.ascontiguousarray(torch.permute(input, (0, 2, 1)).detach().numpy()) # Transpose to channels_last
738737

739738
pytorch_prediction = model(input).detach().numpy().flatten()
740-
hls_prediction = hls_model.predict(hls_input).flatten()
739+
hls_prediction = hls_model.predict(input.detach().numpy()).flatten()
741740

742741
np.testing.assert_allclose(hls_prediction, pytorch_prediction, rtol=0, atol=5e-2)
743742

@@ -785,8 +784,7 @@ def forward(self, x):
785784
config = config_from_pytorch_model(
786785
model,
787786
default_precision='ap_fixed<32,16>',
788-
inputs_channel_last=False, # Crucial for testing if the first Transpose was removed
789-
transpose_outputs=False,
787+
channels_last_conversion="full", # Crucial for testing if the first Transpose was removed
790788
)
791789
output_dir = str(test_root_path / f'hls4mlprj_pytorch_api_transpose_nop_{tensor_rank}d_{backend}_{io_type}')
792790
hls_model = convert_from_pytorch_model(

0 commit comments

Comments
 (0)