Skip to content

Commit 58eda32

Browse files
committed
Move quantizer check to common function
1 parent b1f40cc commit 58eda32

File tree

1 file changed

+15
-53
lines changed

1 file changed

+15
-53
lines changed

hls4ml/converters/keras/qkeras.py

Lines changed: 15 additions & 53 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,9 @@
99

1010

1111
def get_quantizer_from_config(keras_layer, quantizer_var):
12-
quantizer_config = keras_layer['config'][f'{quantizer_var}_quantizer']
12+
quantizer_config = keras_layer['config'].get(f'{quantizer_var}_quantizer', None)
13+
if quantizer_config is None:
14+
return None # No quantizer specified in the layer
1315
if keras_layer['class_name'] == 'QBatchNormalization':
1416
return QKerasQuantizer(quantizer_config)
1517
elif 'binary' in quantizer_config['class_name']:
@@ -24,15 +26,8 @@ def get_quantizer_from_config(keras_layer, quantizer_var):
2426
def parse_qdense_layer(keras_layer, input_names, input_shapes, data_reader):
2527
layer, output_shape = parse_dense_layer(keras_layer, input_names, input_shapes, data_reader)
2628

27-
if keras_layer['config']['kernel_quantizer'] is not None:
28-
layer['weight_quantizer'] = get_quantizer_from_config(keras_layer, 'kernel')
29-
else:
30-
layer['weight_quantizer'] = None
31-
32-
if keras_layer['config']['bias_quantizer'] is not None:
33-
layer['bias_quantizer'] = get_quantizer_from_config(keras_layer, 'bias')
34-
else:
35-
layer['bias_quantizer'] = None
29+
layer['weight_quantizer'] = get_quantizer_from_config(keras_layer, 'kernel')
30+
layer['bias_quantizer'] = get_quantizer_from_config(keras_layer, 'bias')
3631

3732
return layer, output_shape
3833

@@ -46,15 +41,8 @@ def parse_qconv_layer(keras_layer, input_names, input_shapes, data_reader):
4641
elif '2D' in keras_layer['class_name']:
4742
layer, output_shape = parse_conv2d_layer(keras_layer, input_names, input_shapes, data_reader)
4843

49-
if keras_layer['config']['kernel_quantizer'] is not None:
50-
layer['weight_quantizer'] = get_quantizer_from_config(keras_layer, 'kernel')
51-
else:
52-
layer['weight_quantizer'] = None
53-
54-
if keras_layer['config']['bias_quantizer'] is not None:
55-
layer['bias_quantizer'] = get_quantizer_from_config(keras_layer, 'bias')
56-
else:
57-
layer['bias_quantizer'] = None
44+
layer['weight_quantizer'] = get_quantizer_from_config(keras_layer, 'kernel')
45+
layer['bias_quantizer'] = get_quantizer_from_config(keras_layer, 'bias')
5846

5947
return layer, output_shape
6048

@@ -63,14 +51,8 @@ def parse_qconv_layer(keras_layer, input_names, input_shapes, data_reader):
6351
def parse_qdepthwiseqconv_layer(keras_layer, input_names, input_shapes, data_reader):
6452
layer, output_shape = parse_conv2d_layer(keras_layer, input_names, input_shapes, data_reader)
6553

66-
if keras_layer['config']['depthwise_quantizer'] is not None:
67-
layer['depthwise_quantizer'] = get_quantizer_from_config(keras_layer, 'depthwise')
68-
else:
69-
layer['depthwise_quantizer'] = None
70-
if keras_layer['config']['bias_quantizer'] is not None:
71-
layer['bias_quantizer'] = get_quantizer_from_config(keras_layer, 'bias')
72-
else:
73-
layer['bias_quantizer'] = None
54+
layer['depthwise_quantizer'] = get_quantizer_from_config(keras_layer, 'depthwise')
55+
layer['bias_quantizer'] = get_quantizer_from_config(keras_layer, 'bias')
7456

7557
return layer, output_shape
7658

@@ -84,19 +66,9 @@ def parse_qsepconv_layer(keras_layer, input_names, input_shapes, data_reader):
8466
elif '2D' in keras_layer['class_name']:
8567
layer, output_shape = parse_conv2d_layer(keras_layer, input_names, input_shapes, data_reader)
8668

87-
if keras_layer['config']['depthwise_quantizer'] is not None:
88-
layer['depthwise_quantizer'] = get_quantizer_from_config(keras_layer, 'depthwise')
89-
else:
90-
layer['depthwise_quantizer'] = None
91-
if keras_layer['config']['pointwise_quantizer'] is not None:
92-
layer['pointwise_quantizer'] = get_quantizer_from_config(keras_layer, 'pointwise')
93-
else:
94-
layer['pointwise_quantizer'] = None
95-
96-
if keras_layer['config']['bias_quantizer'] is not None:
97-
layer['bias_quantizer'] = get_quantizer_from_config(keras_layer, 'bias')
98-
else:
99-
layer['bias_quantizer'] = None
69+
layer['depthwise_quantizer'] = get_quantizer_from_config(keras_layer, 'depthwise')
70+
layer['pointwise_quantizer'] = get_quantizer_from_config(keras_layer, 'pointwise')
71+
layer['bias_quantizer'] = get_quantizer_from_config(keras_layer, 'bias')
10072

10173
return layer, output_shape
10274

@@ -107,19 +79,9 @@ def parse_qrnn_layer(keras_layer, input_names, input_shapes, data_reader):
10779

10880
layer, output_shape = parse_rnn_layer(keras_layer, input_names, input_shapes, data_reader)
10981

110-
if keras_layer['config']['kernel_quantizer'] is not None:
111-
layer['weight_quantizer'] = get_quantizer_from_config(keras_layer, 'kernel')
112-
else:
113-
layer['weight_quantizer'] = None
114-
if keras_layer['config']['recurrent_quantizer'] is not None:
115-
layer['recurrent_quantizer'] = get_quantizer_from_config(keras_layer, 'recurrent')
116-
else:
117-
layer['recurrent_quantizer'] = None
118-
119-
if keras_layer['config']['bias_quantizer'] is not None:
120-
layer['bias_quantizer'] = get_quantizer_from_config(keras_layer, 'bias')
121-
else:
122-
layer['bias_quantizer'] = None
82+
layer['weight_quantizer'] = get_quantizer_from_config(keras_layer, 'kernel')
83+
layer['recurrent_quantizer'] = get_quantizer_from_config(keras_layer, 'recurrent')
84+
layer['bias_quantizer'] = get_quantizer_from_config(keras_layer, 'bias')
12385

12486
return layer, output_shape
12587

0 commit comments

Comments
 (0)