@@ -399,6 +399,38 @@ def test_qactivation_kwarg(randX_100_10, activation_quantizer, weight_quantizer)
399
399
assert sum (wrong ) / len (wrong ) <= 0.005
400
400
401
401
402
+ @pytest .mark .parametrize ('backend' , ['Vivado' , 'Vitis' , 'Quartus' ])
403
+ @pytest .mark .parametrize ('io_type' , ['io_parallel' , 'io_stream' ])
404
+ def test_quantizer_parsing (randX_100_10 , backend , io_type ):
405
+ X = randX_100_10
406
+ X = np .round (X * 2 ** 10 ) * 2 ** - 10 # make it an exact ap_fixed<16,6>
407
+ model = Sequential ()
408
+ model .add (
409
+ QDense (
410
+ 8 ,
411
+ input_shape = (10 ,),
412
+ kernel_quantizer = None , # Incorrect usage, but shouldn't break hls4ml
413
+ kernel_initializer = 'ones' ,
414
+ bias_quantizer = None ,
415
+ bias_initializer = 'zeros' ,
416
+ activation = 'quantized_relu(8, 0)' ,
417
+ )
418
+ )
419
+ model .compile ()
420
+
421
+ config = hls4ml .utils .config_from_keras_model (model , granularity = 'name' , default_precision = 'fixed<24,8>' )
422
+ output_dir = str (test_root_path / f'hls4mlprj_qkeras_quant_parse_{ backend } _{ io_type } ' )
423
+ hls_model = hls4ml .converters .convert_from_keras_model (
424
+ model , hls_config = config , output_dir = output_dir , backend = backend , io_type = io_type
425
+ )
426
+ hls_model .compile ()
427
+
428
+ y_qkeras = model .predict (X )
429
+ y_hls4ml = hls_model .predict (X )
430
+
431
+ np .testing .assert_array_equal (y_qkeras , y_hls4ml .reshape (y_qkeras .shape ))
432
+
433
+
402
434
@pytest .fixture (scope = 'module' )
403
435
def randX_100_8_8_1 ():
404
436
return np .random .rand (100 , 8 , 8 , 1 )
0 commit comments