Skip to content

Commit 7255cbc

Browse files
authored
Merge pull request #997 from fastmachinelearning/weight_quantizer_none
add protection for when kernel_quantizer is None
2 parents 02688f1 + 03ea15d commit 7255cbc

File tree

2 files changed

+40
-24
lines changed

2 files changed

+40
-24
lines changed

hls4ml/converters/keras/qkeras.py

Lines changed: 8 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,9 @@
99

1010

1111
def get_quantizer_from_config(keras_layer, quantizer_var):
12-
quantizer_config = keras_layer['config'][f'{quantizer_var}_quantizer']
12+
quantizer_config = keras_layer['config'].get(f'{quantizer_var}_quantizer', None)
13+
if quantizer_config is None:
14+
return None # No quantizer specified in the layer
1315
if keras_layer['class_name'] == 'QBatchNormalization':
1416
return QKerasQuantizer(quantizer_config)
1517
elif 'binary' in quantizer_config['class_name']:
@@ -25,10 +27,7 @@ def parse_qdense_layer(keras_layer, input_names, input_shapes, data_reader):
2527
layer, output_shape = parse_dense_layer(keras_layer, input_names, input_shapes, data_reader)
2628

2729
layer['weight_quantizer'] = get_quantizer_from_config(keras_layer, 'kernel')
28-
if keras_layer['config']['bias_quantizer'] is not None:
29-
layer['bias_quantizer'] = get_quantizer_from_config(keras_layer, 'bias')
30-
else:
31-
layer['bias_quantizer'] = None
30+
layer['bias_quantizer'] = get_quantizer_from_config(keras_layer, 'bias')
3231

3332
return layer, output_shape
3433

@@ -43,10 +42,7 @@ def parse_qconv_layer(keras_layer, input_names, input_shapes, data_reader):
4342
layer, output_shape = parse_conv2d_layer(keras_layer, input_names, input_shapes, data_reader)
4443

4544
layer['weight_quantizer'] = get_quantizer_from_config(keras_layer, 'kernel')
46-
if keras_layer['config']['bias_quantizer'] is not None:
47-
layer['bias_quantizer'] = get_quantizer_from_config(keras_layer, 'bias')
48-
else:
49-
layer['bias_quantizer'] = None
45+
layer['bias_quantizer'] = get_quantizer_from_config(keras_layer, 'bias')
5046

5147
return layer, output_shape
5248

@@ -56,11 +52,7 @@ def parse_qdepthwiseqconv_layer(keras_layer, input_names, input_shapes, data_rea
5652
layer, output_shape = parse_conv2d_layer(keras_layer, input_names, input_shapes, data_reader)
5753

5854
layer['depthwise_quantizer'] = get_quantizer_from_config(keras_layer, 'depthwise')
59-
60-
if keras_layer['config']['bias_quantizer'] is not None:
61-
layer['bias_quantizer'] = get_quantizer_from_config(keras_layer, 'bias')
62-
else:
63-
layer['bias_quantizer'] = None
55+
layer['bias_quantizer'] = get_quantizer_from_config(keras_layer, 'bias')
6456

6557
return layer, output_shape
6658

@@ -76,11 +68,7 @@ def parse_qsepconv_layer(keras_layer, input_names, input_shapes, data_reader):
7668

7769
layer['depthwise_quantizer'] = get_quantizer_from_config(keras_layer, 'depthwise')
7870
layer['pointwise_quantizer'] = get_quantizer_from_config(keras_layer, 'pointwise')
79-
80-
if keras_layer['config']['bias_quantizer'] is not None:
81-
layer['bias_quantizer'] = get_quantizer_from_config(keras_layer, 'bias')
82-
else:
83-
layer['bias_quantizer'] = None
71+
layer['bias_quantizer'] = get_quantizer_from_config(keras_layer, 'bias')
8472

8573
return layer, output_shape
8674

@@ -93,11 +81,7 @@ def parse_qrnn_layer(keras_layer, input_names, input_shapes, data_reader):
9381

9482
layer['weight_quantizer'] = get_quantizer_from_config(keras_layer, 'kernel')
9583
layer['recurrent_quantizer'] = get_quantizer_from_config(keras_layer, 'recurrent')
96-
97-
if keras_layer['config']['bias_quantizer'] is not None:
98-
layer['bias_quantizer'] = get_quantizer_from_config(keras_layer, 'bias')
99-
else:
100-
layer['bias_quantizer'] = None
84+
layer['bias_quantizer'] = get_quantizer_from_config(keras_layer, 'bias')
10185

10286
return layer, output_shape
10387

test/pytest/test_qkeras.py

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -399,6 +399,38 @@ def test_qactivation_kwarg(randX_100_10, activation_quantizer, weight_quantizer)
399399
assert sum(wrong) / len(wrong) <= 0.005
400400

401401

402+
@pytest.mark.parametrize('backend', ['Vivado', 'Vitis', 'Quartus'])
403+
@pytest.mark.parametrize('io_type', ['io_parallel', 'io_stream'])
404+
def test_quantizer_parsing(randX_100_10, backend, io_type):
405+
X = randX_100_10
406+
X = np.round(X * 2**10) * 2**-10 # make it an exact ap_fixed<16,6>
407+
model = Sequential()
408+
model.add(
409+
QDense(
410+
8,
411+
input_shape=(10,),
412+
kernel_quantizer=None, # Incorrect usage, but shouldn't break hls4ml
413+
kernel_initializer='ones',
414+
bias_quantizer=None,
415+
bias_initializer='zeros',
416+
activation='quantized_relu(8, 0)',
417+
)
418+
)
419+
model.compile()
420+
421+
config = hls4ml.utils.config_from_keras_model(model, granularity='name', default_precision='fixed<24,8>')
422+
output_dir = str(test_root_path / f'hls4mlprj_qkeras_quant_parse_{backend}_{io_type}')
423+
hls_model = hls4ml.converters.convert_from_keras_model(
424+
model, hls_config=config, output_dir=output_dir, backend=backend, io_type=io_type
425+
)
426+
hls_model.compile()
427+
428+
y_qkeras = model.predict(X)
429+
y_hls4ml = hls_model.predict(X)
430+
431+
np.testing.assert_array_equal(y_qkeras, y_hls4ml.reshape(y_qkeras.shape))
432+
433+
402434
@pytest.fixture(scope='module')
403435
def randX_100_8_8_1():
404436
return np.random.rand(100, 8, 8, 1)

0 commit comments

Comments
 (0)