|
30 | 30 |
|
31 | 31 | import numpy as np
|
32 | 32 | import onnx
|
33 |
| -import onnx.helper as oh |
34 | 33 | import onnx.numpy_helper as nph
|
35 |
| -from onnx import TensorProto |
| 34 | +import onnx.parser as oprs |
36 | 35 | from onnx.checker import check_model
|
37 | 36 | from pkgutil import get_data
|
38 | 37 |
|
39 | 38 | import qonnx.core.onnx_exec as oxe
|
40 | 39 | from qonnx.core.datatype import DataType
|
41 | 40 | from qonnx.core.modelwrapper import ModelWrapper
|
| 41 | +from qonnx.custom_op.general.quant import quant |
42 | 42 | from qonnx.transformation.infer_shapes import InferShapes
|
43 | 43 | from qonnx.transformation.resize_conv_to_deconv import ResizeConvolutionToDeconvolution
|
44 |
| -from qonnx.util.basic import gen_finn_dt_tensor, qonnx_make_model |
| 44 | +from qonnx.util.basic import gen_finn_dt_tensor |
45 | 45 |
|
46 | 46 | np.random.seed(0)
|
47 | 47 |
|
@@ -98,75 +98,244 @@ def test_resize_conv_to_deconv_quant_model(maintain_bit_width: bool):
|
98 | 98 | ).all(), "Error: expected output does not match the produced output."
|
99 | 99 |
|
100 | 100 |
|
101 |
| -def create_nn_resize_conv_model( |
102 |
| - in_channels: int, out_channels: int, input_dim: int, kernel_size: int, upscale_factor: int, bias: bool |
103 |
| -): |
104 |
| - assert isinstance(kernel_size, int), "Assuming square kernels, so kernel_size needs to be an int." |
105 |
| - padding = (kernel_size - 1) // 2 |
106 |
| - |
107 |
| - ifm_ch = in_channels |
108 |
| - ifm_dim = input_dim |
109 |
| - ofm_dim = ifm_dim * upscale_factor |
110 |
| - ofm_ch = out_channels |
111 |
| - scales = np.array([1.0, 1.0, upscale_factor, upscale_factor], dtype=np.float32) |
112 |
| - |
113 |
| - resize = oh.make_node( |
114 |
| - "Resize", |
115 |
| - inputs=["inp", "roi", "scales"], |
116 |
| - outputs=["hid"], |
117 |
| - mode="nearest", |
118 |
| - ) |
119 |
| - conv = oh.make_node( |
120 |
| - op_type="Conv", |
121 |
| - inputs=["hid", "W"] if not bias else ["hid", "W", "B"], |
122 |
| - outputs=["out"], |
123 |
| - kernel_shape=[kernel_size, kernel_size], |
124 |
| - pads=[padding, padding, padding, padding], |
125 |
| - strides=[1, 1], |
126 |
| - group=1, |
127 |
| - dilations=[1, 1], |
128 |
| - ) |
129 |
| - |
130 |
| - input_shape = [1, ifm_ch, ifm_dim, ifm_dim] |
131 |
| - output_shape = [1, ofm_ch, ofm_dim, ofm_dim] |
132 |
| - |
133 |
| - conv_param_shape = [ofm_ch, ifm_ch, kernel_size, kernel_size] |
134 |
| - bias_param_shape = [ofm_ch] |
135 |
| - |
136 |
| - inp = oh.make_tensor_value_info("inp", TensorProto.FLOAT, input_shape) |
137 |
| - out = oh.make_tensor_value_info("out", TensorProto.FLOAT, output_shape) |
138 |
| - |
139 |
| - W_conv = oh.make_tensor_value_info("W", TensorProto.FLOAT, conv_param_shape) |
140 |
| - B_conv = oh.make_tensor_value_info("B", TensorProto.FLOAT, bias_param_shape) |
141 |
| - |
142 |
| - value_info = [W_conv] if not bias else [W_conv, B_conv] |
143 |
| - |
144 |
| - graph = oh.make_graph( |
145 |
| - nodes=[resize, conv], |
146 |
| - name="cnv_graph", |
147 |
| - inputs=[inp], |
148 |
| - outputs=[out], |
149 |
| - value_info=value_info, |
150 |
| - ) |
151 |
| - modelproto = qonnx_make_model(graph, producer_name="test_model") |
152 |
| - model = ModelWrapper(modelproto) |
| 101 | +def float_nn_resize_model(r: int, ifm: int, ich: int, och: int, ksize: int, use_bias: bool): |
| 102 | + assert isinstance(ksize, int), "Assuming square kernels, so kernel_size needs to be an int." |
| 103 | + pad = (ksize - 1) // 2 |
| 104 | + |
| 105 | + ishp = (1, ich, ifm, ifm) |
| 106 | + oshp = (1, och, ifm * r, ifm * r) |
| 107 | + wshp = (och, ich, ksize, ksize) |
| 108 | + bshp = (och,) |
| 109 | + rscales = np.array([1.0, 1.0, r, r], dtype=np.float32) |
| 110 | + weight = np.random.randn(*wshp) |
| 111 | + bias = np.random.randn(*bshp) |
| 112 | + ishp_str = str(list(ishp)) |
| 113 | + oshp_str = str(list(oshp)) |
| 114 | + wshp_str = str(list(wshp)) |
| 115 | + bshp_str = str(list(bshp)) |
| 116 | + |
| 117 | + if use_bias: |
| 118 | + params_str = f""" |
| 119 | + < |
| 120 | + float{wshp_str} conv_param, |
| 121 | + float{bshp_str} bias_param, |
| 122 | + float roi, |
| 123 | + float scales |
| 124 | + > |
| 125 | + """ |
| 126 | + else: |
| 127 | + params_str = f""" |
| 128 | + < |
| 129 | + float{wshp_str} conv_param, |
| 130 | + float roi, |
| 131 | + float scales |
| 132 | + > |
| 133 | + """ |
| 134 | + |
| 135 | + if use_bias: |
| 136 | + conv_str = f""" |
| 137 | + out0 = Conv< |
| 138 | + dilations=[1,1], |
| 139 | + group=1, |
| 140 | + kernel_shape=[{ksize},{ksize}], |
| 141 | + strides=[1,1], |
| 142 | + pads=[{pad},{pad},{pad},{pad}] |
| 143 | + >(hid0, conv_param, bias_param) |
| 144 | + """ |
| 145 | + else: |
| 146 | + conv_str = f""" |
| 147 | + out0 = Conv< |
| 148 | + dilations=[1,1], |
| 149 | + group=1, |
| 150 | + kernel_shape=[{ksize},{ksize}], |
| 151 | + strides=[1,1], |
| 152 | + pads=[{pad},{pad},{pad},{pad}] |
| 153 | + >(hid0, conv_param) |
| 154 | + """ |
| 155 | + |
| 156 | + input = f""" |
| 157 | + < |
| 158 | + ir_version: 7, |
| 159 | + opset_import: ["" : 13] |
| 160 | + > |
| 161 | + agraph (float{ishp_str} in0) => (float{oshp_str} out0) |
| 162 | + {params_str} |
| 163 | + {{ |
| 164 | + hid0 = Resize< |
| 165 | + mode="nearest" |
| 166 | + >(in0, roi, scales) |
| 167 | + {conv_str} |
| 168 | + }} |
| 169 | + """ |
| 170 | + |
| 171 | + model = oprs.parse_model(input) |
| 172 | + model = ModelWrapper(model) |
| 173 | + model.set_initializer("roi", np.empty(0)) |
| 174 | + model.set_initializer("scales", rscales.astype(np.float32)) |
| 175 | + model.set_initializer("conv_param", weight.astype(np.float32)) |
| 176 | + if use_bias: |
| 177 | + model.set_initializer("bias_param", bias.astype(np.float32)) |
| 178 | + model = model.transform(InferShapes()) |
| 179 | + check_model(model._model_proto) |
| 180 | + return model |
| 181 | + |
| 182 | + |
| 183 | +def quant_nn_resize_model(r: int, ifm: int, ich: int, och: int, ksize: int, use_bias: bool, channelwise: bool): |
| 184 | + assert isinstance(ksize, int), "Assuming square kernels, so kernel_size needs to be an int." |
| 185 | + pad = (ksize - 1) // 2 |
| 186 | + |
| 187 | + ishp = (1, ich, ifm, ifm) |
| 188 | + oshp = (1, och, ifm * r, ifm * r) |
| 189 | + wshp = (och, ich, ksize, ksize) |
| 190 | + bshp = (och,) |
| 191 | + rscales = np.array([1.0, 1.0, r, r], dtype=np.float32) |
| 192 | + weight = np.random.randn(*wshp) |
| 193 | + bias = np.random.randn(*bshp) |
| 194 | + ishp_str = str(list(ishp)) |
| 195 | + oshp_str = str(list(oshp)) |
| 196 | + wshp_str = str(list(wshp)) |
| 197 | + bshp_str = str(list(bshp)) |
| 198 | + |
| 199 | + if channelwise: |
| 200 | + q_attr_shp = (och, 1, 1, 1) |
| 201 | + else: |
| 202 | + q_attr_shp = (1,) |
| 203 | + attrshp_str = str(list(q_attr_shp)) |
| 204 | + scale = np.random.rand(*q_attr_shp).astype(np.float32) |
| 205 | + zeropt = np.zeros(q_attr_shp).astype(np.float32) # NOTE: needs to be integer |
| 206 | + bitwidth = np.array(4.0) |
| 207 | + |
| 208 | + weight: np.ndarray = quant(weight, scale, zeropt, bitwidth, signed=True, narrow=True, rounding_mode="ROUND") |
| 209 | + |
| 210 | + if use_bias: |
| 211 | + params_str = f""" |
| 212 | + < |
| 213 | + float{wshp_str} conv_param, |
| 214 | + float{attrshp_str} scale_param, |
| 215 | + float{attrshp_str} zeropt_param, |
| 216 | + float{bshp_str} bias_param, |
| 217 | + float bitwidth_param, |
| 218 | + float scale_bias, |
| 219 | + float zeropt_bias, |
| 220 | + float bitwidth_bias, |
| 221 | + float roi, |
| 222 | + float scales |
| 223 | + > |
| 224 | + """ |
| 225 | + else: |
| 226 | + params_str = f""" |
| 227 | + < |
| 228 | + float{wshp_str} conv_param, |
| 229 | + float{attrshp_str} scale_param, |
| 230 | + float{attrshp_str} zeropt_param, |
| 231 | + float roi, |
| 232 | + float scales, |
| 233 | + float bitwidth_param |
| 234 | + > |
| 235 | + """ |
| 236 | + |
| 237 | + if use_bias: |
| 238 | + scale_bias = np.random.rand( |
| 239 | + 1, |
| 240 | + ) |
| 241 | + zeropt_bias = np.array(0.0) |
| 242 | + bitwidth_bias = np.array(16.0) |
| 243 | + convs_str = f""" |
| 244 | + param1 = qonnx.custom_op.general.Quant< |
| 245 | + signed=1, |
| 246 | + narrow=1, |
| 247 | + rounding_mode="ROUND" |
| 248 | + >(bias_param, scale_bias, zeropt_bias, bitwidth_bias) |
| 249 | + out0 = Conv< |
| 250 | + dilations=[1,1], |
| 251 | + group=1, |
| 252 | + kernel_shape=[{ksize},{ksize}], |
| 253 | + strides=[1,1], |
| 254 | + pads=[{pad},{pad},{pad},{pad}] |
| 255 | + >(hid0, param0, param1) |
| 256 | + """ |
| 257 | + else: |
| 258 | + convs_str = f""" |
| 259 | + out0 = Conv< |
| 260 | + dilations=[1,1], |
| 261 | + group=1, |
| 262 | + kernel_shape=[{ksize},{ksize}], |
| 263 | + strides=[1,1], |
| 264 | + pads=[{pad},{pad},{pad},{pad}] |
| 265 | + >(hid0, param0) |
| 266 | + """ |
| 267 | + |
| 268 | + input = f""" |
| 269 | + < |
| 270 | + ir_version: 7, |
| 271 | + opset_import: ["" : 13, "qonnx.custom_op.general" : 1] |
| 272 | + > |
| 273 | + agraph (float{ishp_str} in0) => (float{oshp_str} out0) |
| 274 | + {params_str} |
| 275 | + {{ |
| 276 | + hid0 = Resize< |
| 277 | + mode="nearest" |
| 278 | + >(in0, roi, scales) |
| 279 | + param0 = qonnx.custom_op.general.Quant< |
| 280 | + signed=1, |
| 281 | + narrow=1, |
| 282 | + rounding_mode="ROUND" |
| 283 | + >(conv_param, scale_param, zeropt_param, bitwidth_param) |
| 284 | + {convs_str} |
| 285 | + }} |
| 286 | + """ |
| 287 | + model = oprs.parse_model(input) |
| 288 | + model = ModelWrapper(model) |
153 | 289 | model.set_initializer("roi", np.empty(0))
|
154 |
| - model.set_initializer("scales", scales) |
155 |
| - model.set_initializer("W", np.random.rand(*conv_param_shape).astype(np.float32)) |
156 |
| - if bias: |
157 |
| - model.set_initializer("B", np.random.rand(*bias_param_shape).astype(np.float32)) |
| 290 | + model.set_initializer("scales", rscales.astype(np.float32)) |
| 291 | + model.set_initializer("conv_param", weight.astype(np.float32)) |
| 292 | + if use_bias: |
| 293 | + model.set_initializer("bias_param", bias.astype(np.float32)) |
| 294 | + model.set_initializer("scale_bias", scale_bias.astype(np.float32)) |
| 295 | + model.set_initializer("zeropt_bias", zeropt_bias.astype(np.float32)) |
| 296 | + model.set_initializer("bitwidth_bias", bitwidth_bias.astype(np.float32)) |
| 297 | + model.set_initializer("scale_param", scale.astype(np.float32)) |
| 298 | + model.set_initializer("zeropt_param", zeropt.astype(np.float32)) |
| 299 | + model.set_initializer("bitwidth_param", bitwidth.astype(np.float32)) |
158 | 300 | model = model.transform(InferShapes())
|
159 | 301 | check_model(model._model_proto)
|
160 | 302 | return model
|
161 | 303 |
|
162 | 304 |
|
163 |
| -@pytest.mark.parametrize("kernel_size", [1, 3, 5, 7]) |
| 305 | +@pytest.mark.parametrize("kernel_size", [3, 5, 7]) |
164 | 306 | @pytest.mark.parametrize("upscale_factor", [1, 2, 3, 4])
|
165 | 307 | @pytest.mark.parametrize("bias", [True, False])
|
166 |
| -def test_resize_conv_to_deconv_layer(kernel_size: int, upscale_factor: int, bias: bool): |
| 308 | +def test_float_resize_conv_to_deconv_layer(kernel_size: int, upscale_factor: int, bias: bool): |
| 309 | + och = 10 # output channels |
| 310 | + ich = 3 # input channels |
| 311 | + ifm = 4 # input feature map size |
| 312 | + input_shape = [1, ich, ifm, ifm] |
167 | 313 | # Create resize convolution layer that upsamples a 4x4 image with 1 I/O channel
|
168 |
| - model_1 = create_nn_resize_conv_model(3, 10, 4, kernel_size, upscale_factor, bias) |
| 314 | + model_1 = float_nn_resize_model(upscale_factor, ifm, ich, och, kernel_size, bias) |
169 | 315 | model_2 = model_1.transform(ResizeConvolutionToDeconvolution())
|
170 |
| - input_shape = [1, 3, 4, 4] |
171 | 316 | inp_dict = {"inp": np.random.rand(*input_shape).astype(np.float32)}
|
172 | 317 | assert oxe.compare_execution(model_1, model_2, inp_dict)
|
| 318 | + |
| 319 | + |
| 320 | +@pytest.mark.parametrize("kernel_size", [3, 5, 7]) |
| 321 | +@pytest.mark.parametrize("upscale_factor", [1, 2, 3, 4]) |
| 322 | +@pytest.mark.parametrize("bias", [True, False]) |
| 323 | +@pytest.mark.parametrize("channelwise", [True, False]) |
| 324 | +@pytest.mark.parametrize("maintain_bit_width", [True, False]) |
| 325 | +def test_quant_resize_conv_to_deconv_layer( |
| 326 | + kernel_size: int, upscale_factor: int, bias: bool, channelwise: bool, maintain_bit_width: bool |
| 327 | +): |
| 328 | + och = 10 # output channels |
| 329 | + ich = 3 # input channels |
| 330 | + ifm = 4 # input feature map size |
| 331 | + input_shape = [1, ich, ifm, ifm] |
| 332 | + # Create resize convolution layer that upsamples a 4x4 image with 1 I/O channel |
| 333 | + model_1 = quant_nn_resize_model(upscale_factor, ifm, ich, och, kernel_size, bias, channelwise) |
| 334 | + model_2 = model_1.transform(ResizeConvolutionToDeconvolution(maintain_bit_width=maintain_bit_width)) |
| 335 | + inp_dict = {"inp": np.random.rand(*input_shape).astype(np.float32)} |
| 336 | + assert oxe.compare_execution(model_1, model_2, inp_dict) |
| 337 | + |
| 338 | + if maintain_bit_width: |
| 339 | + bw1 = model_1.get_initializer("bitwidth_param") |
| 340 | + bw2 = model_2.get_initializer("bitwidth_param") |
| 341 | + assert (bw1 == bw2).all() |
0 commit comments