13
13
atol = 5e-3
14
14
15
15
16
- @pytest .fixture ( scope = 'module' )
16
+ @pytest .fixture
17
17
def data ():
18
18
np .random .seed (0 )
19
19
X = np .random .rand (100 , in_shape )
20
20
return X
21
21
22
22
23
+ @pytest .fixture (scope = 'module' )
24
+ def fusion_data ():
25
+ n_batch = 2
26
+ n_in = 2
27
+ size_in_height = 32
28
+ X = np .random .rand (n_batch , n_in , size_in_height )
29
+ return X
30
+
31
+
23
32
@pytest .mark .parametrize ('io_type' , ['io_parallel' , 'io_stream' ])
24
33
@pytest .mark .parametrize ('backend' , ['Vivado' , 'Vitis' , 'Quartus' , 'Catapult' ])
25
34
def test_batchnorm (data , backend , io_type ):
@@ -41,3 +50,78 @@ def test_batchnorm(data, backend, io_type):
41
50
pytorch_prediction = model (torch .Tensor (data )).detach ().numpy ()
42
51
hls_prediction = hls_model .predict (data )
43
52
np .testing .assert_allclose (pytorch_prediction , hls_prediction , rtol = 0 , atol = atol , verbose = True )
53
+
54
+
55
+ atol = 5e-2
56
+
57
+
58
+ class BatchNorm_w_Fusion (nn .Module ):
59
+ def __init__ (self , filters , momentum ):
60
+ super ().__init__ ()
61
+ self .conv1 = nn .Conv1d (
62
+ int (filters ),
63
+ filters ,
64
+ kernel_size = 3 ,
65
+ stride = 1 ,
66
+ padding = 1 ,
67
+ bias = False ,
68
+ )
69
+ self .bn1 = nn .BatchNorm1d (filters )
70
+ self .relu1 = nn .ReLU ()
71
+
72
+ def forward (self , x ):
73
+ x = self .conv1 (x )
74
+ x = self .bn1 (x )
75
+ x = self .relu1 (x )
76
+ return x
77
+
78
+
79
+ @pytest .mark .parametrize ('io_type' , ['io_parallel' , 'io_stream' ])
80
+ @pytest .mark .parametrize ('backend' , ['Vivado' , 'Vitis' , 'Quartus' , 'Catapult' ])
81
+ def test_batchnorm_fusion (fusion_data , backend , io_type ):
82
+ n_in = 2
83
+ momentum = 0.99
84
+ size_in_height = 32
85
+ filters = n_in
86
+
87
+ # see above for model definition
88
+ model = BatchNorm_w_Fusion (filters , momentum )
89
+ # Important to set model to eval to fix batchnorm behavior
90
+ model .eval ()
91
+ # generating config
92
+ pytorch_prediction = model (torch .Tensor (fusion_data )).detach ().numpy ()
93
+
94
+ # We do not have an implementation of a transpose for io_stream, need to transpose inputs and outputs outside of hls4ml
95
+ if io_type == 'io_stream' :
96
+ fusion_data = np .ascontiguousarray (fusion_data .transpose (0 , 2 , 1 ))
97
+ config = hls4ml .utils .config_from_pytorch_model (model , channels_last_conversion = 'internal' , transpose_outputs = False )
98
+ else :
99
+ config = hls4ml .utils .config_from_pytorch_model (model , channels_last_conversion = 'full' , transpose_outputs = True )
100
+
101
+ config ['Model' ]['Strategy' ] = 'Resource'
102
+
103
+ # conversion
104
+ output_dir = str (test_root_path / f'hls4mlprj_block_{ backend } _{ io_type } ' )
105
+ hls_model = hls4ml .converters .convert_from_pytorch_model (
106
+ model ,
107
+ (None , n_in , size_in_height ),
108
+ hls_config = config ,
109
+ output_dir = output_dir ,
110
+ backend = backend ,
111
+ io_type = io_type ,
112
+ )
113
+
114
+ # compiling model
115
+ hls_model .compile ()
116
+
117
+ if io_type == 'io_stream' :
118
+ hls_prediction = np .transpose (
119
+ np .reshape (
120
+ hls_model .predict (fusion_data ),
121
+ (pytorch_prediction .shape [0 ], pytorch_prediction .shape [2 ], pytorch_prediction .shape [1 ]),
122
+ ),
123
+ (0 , 2 , 1 ),
124
+ )
125
+ else :
126
+ hls_prediction = np .reshape (hls_model .predict (fusion_data ), pytorch_prediction .shape )
127
+ np .testing .assert_allclose (pytorch_prediction , hls_prediction , rtol = 0 , atol = atol , verbose = True )
0 commit comments