Skip to content

Commit dfcf034

Browse files
authored
Merge branch 'main' into sep_to_dw_point
2 parents ad39b8a + 4f4b164 commit dfcf034

File tree

15 files changed

+162
-233
lines changed

15 files changed

+162
-233
lines changed

.pre-commit-config.yaml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ repos:
3030
args: ["--profile", "black", --line-length=125]
3131

3232
- repo: https://github.com/asottile/pyupgrade
33-
rev: v3.15.2
33+
rev: v3.16.0
3434
hooks:
3535
- id: pyupgrade
3636
args: ["--py36-plus"]
@@ -41,7 +41,7 @@ repos:
4141
- id: setup-cfg-fmt
4242

4343
- repo: https://github.com/pycqa/flake8
44-
rev: 7.0.0
44+
rev: 7.1.0
4545
hooks:
4646
- id: flake8
4747
exclude: docs/conf.py

Jenkinsfile

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
pipeline {
22
agent {
33
docker {
4-
image 'vivado-el7:3'
4+
image 'vivado-alma9:1'
55
args '-v /data/Xilinx:/data/Xilinx'
66
}
77
}
@@ -14,8 +14,9 @@ pipeline {
1414
steps {
1515
dir(path: 'test') {
1616
sh '''#!/bin/bash --login
17-
conda activate hls4ml-py38
18-
pip install tensorflow pyparsing
17+
conda activate hls4ml-py310
18+
conda install -y jupyterhub pydot graphviz pytest pytest-cov
19+
pip install pytest-randomly jupyter onnx>=1.4.0 matplotlib pandas seaborn pydigitalwavetools==1.1 pyyaml tensorflow==2.14 qonnx torch git+https://github.com/google/qkeras.git pyparsing
1920
pip install -U ../ --user
2021
./convert-keras-models.sh -x -f keras-models.txt
2122
pip uninstall hls4ml -y'''

hls4ml/backends/quartus/quartus_backend.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -274,7 +274,7 @@ def init_conv1d(self, layer):
274274
# - combination - at compile-time, the decision between Winograd and im2col is made
275275
# - im2col - specifically use im2col
276276
# - Winograd - use Winograd, if possible
277-
layer.set_attr('implementation', layer.model.config.get_layer_config_value(layer, 'Implementation', 'combination'))
277+
layer.set_attr('implementation', layer.model.config.get_layer_config_value(layer, 'Implementation', 'im2col'))
278278

279279
layer.set_attr(
280280
'n_partitions', 1
@@ -305,7 +305,7 @@ def init_conv2d(self, layer):
305305
# - combination - at compile-time, the decision between Winograd and im2col is made
306306
# - im2col - specifically use im2col
307307
# - Winograd - use Winograd, if possible
308-
layer.set_attr('implementation', layer.model.config.get_layer_config_value(layer, 'Implementation', 'combination'))
308+
layer.set_attr('implementation', layer.model.config.get_layer_config_value(layer, 'Implementation', 'im2col'))
309309

310310
layer.set_attr(
311311
'n_partitions', 1

hls4ml/backends/vivado/vivado_backend.py

Lines changed: 0 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -19,8 +19,6 @@
1919
Embedding,
2020
GarNet,
2121
GarNetStack,
22-
GlobalPooling1D,
23-
GlobalPooling2D,
2422
Layer,
2523
Pooling1D,
2624
Pooling2D,
@@ -32,7 +30,6 @@
3230
from hls4ml.model.optimizer import get_backend_passes, layer_optimizer
3331
from hls4ml.model.types import FixedPrecisionType, IntegerPrecisionType, NamedType, PackedType
3432
from hls4ml.report import parse_vivado_report
35-
from hls4ml.utils.fixed_point_utils import ceil_log2
3633

3734

3835
class VivadoBackend(FPGABackend):
@@ -433,37 +430,14 @@ def init_depconv2d(self, layer):
433430

434431
layer.set_attr('implementation', layer.model.config.get_conv_implementation(layer).lower())
435432

436-
def _set_pooling_accum_t(self, layer, pool_size):
437-
extra_bits = ceil_log2(pool_size)
438-
accum_t = layer.get_attr('accum_t')
439-
accum_t.precision.width += extra_bits * 2
440-
if isinstance(accum_t.precision, FixedPrecisionType):
441-
accum_t.precision.integer += extra_bits
442-
443433
@layer_optimizer(Pooling1D)
444434
def init_pooling1d(self, layer):
445-
pool_size = layer.get_attr('pool_width')
446-
self._set_pooling_accum_t(layer, pool_size)
447-
448435
layer.set_attr('implementation', layer.model.config.get_conv_implementation(layer).lower())
449436

450437
@layer_optimizer(Pooling2D)
451438
def init_pooling2d(self, layer):
452-
pool_size = layer.get_attr('pool_height') * layer.get_attr('pool_width')
453-
self._set_pooling_accum_t(layer, pool_size)
454-
455439
layer.set_attr('implementation', layer.model.config.get_conv_implementation(layer).lower())
456440

457-
@layer_optimizer(GlobalPooling1D)
458-
def init_global_pooling1d(self, layer):
459-
pool_size = layer.get_attr('n_in')
460-
self._set_pooling_accum_t(layer, pool_size)
461-
462-
@layer_optimizer(GlobalPooling2D)
463-
def init_global_pooling2d(self, layer):
464-
pool_size = layer.get_attr('in_height') * layer.get_attr('in_width')
465-
self._set_pooling_accum_t(layer, pool_size)
466-
467441
@layer_optimizer(Softmax)
468442
def init_softmax(self, layer):
469443
if layer.model.config.get_config_value('IOType') == 'io_parallel':

hls4ml/converters/keras/qkeras.py

Lines changed: 8 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,9 @@
99

1010

1111
def get_quantizer_from_config(keras_layer, quantizer_var):
12-
quantizer_config = keras_layer['config'][f'{quantizer_var}_quantizer']
12+
quantizer_config = keras_layer['config'].get(f'{quantizer_var}_quantizer', None)
13+
if quantizer_config is None:
14+
return None # No quantizer specified in the layer
1315
if keras_layer['class_name'] == 'QBatchNormalization':
1416
return QKerasQuantizer(quantizer_config)
1517
elif 'binary' in quantizer_config['class_name']:
@@ -25,10 +27,7 @@ def parse_qdense_layer(keras_layer, input_names, input_shapes, data_reader):
2527
layer, output_shape = parse_dense_layer(keras_layer, input_names, input_shapes, data_reader)
2628

2729
layer['weight_quantizer'] = get_quantizer_from_config(keras_layer, 'kernel')
28-
if keras_layer['config']['bias_quantizer'] is not None:
29-
layer['bias_quantizer'] = get_quantizer_from_config(keras_layer, 'bias')
30-
else:
31-
layer['bias_quantizer'] = None
30+
layer['bias_quantizer'] = get_quantizer_from_config(keras_layer, 'bias')
3231

3332
return layer, output_shape
3433

@@ -43,10 +42,7 @@ def parse_qconv_layer(keras_layer, input_names, input_shapes, data_reader):
4342
layer, output_shape = parse_conv2d_layer(keras_layer, input_names, input_shapes, data_reader)
4443

4544
layer['weight_quantizer'] = get_quantizer_from_config(keras_layer, 'kernel')
46-
if keras_layer['config']['bias_quantizer'] is not None:
47-
layer['bias_quantizer'] = get_quantizer_from_config(keras_layer, 'bias')
48-
else:
49-
layer['bias_quantizer'] = None
45+
layer['bias_quantizer'] = get_quantizer_from_config(keras_layer, 'bias')
5046

5147
return layer, output_shape
5248

@@ -56,11 +52,7 @@ def parse_qdepthwiseqconv_layer(keras_layer, input_names, input_shapes, data_rea
5652
layer, output_shape = parse_conv2d_layer(keras_layer, input_names, input_shapes, data_reader)
5753

5854
layer['depthwise_quantizer'] = get_quantizer_from_config(keras_layer, 'depthwise')
59-
60-
if keras_layer['config']['bias_quantizer'] is not None:
61-
layer['bias_quantizer'] = get_quantizer_from_config(keras_layer, 'bias')
62-
else:
63-
layer['bias_quantizer'] = None
55+
layer['bias_quantizer'] = get_quantizer_from_config(keras_layer, 'bias')
6456

6557
return layer, output_shape
6658

@@ -76,11 +68,7 @@ def parse_qsepconv_layer(keras_layer, input_names, input_shapes, data_reader):
7668

7769
layer['depthwise_quantizer'] = get_quantizer_from_config(keras_layer, 'depthwise')
7870
layer['pointwise_quantizer'] = get_quantizer_from_config(keras_layer, 'pointwise')
79-
80-
if keras_layer['config']['bias_quantizer'] is not None:
81-
layer['bias_quantizer'] = get_quantizer_from_config(keras_layer, 'bias')
82-
else:
83-
layer['bias_quantizer'] = None
71+
layer['bias_quantizer'] = get_quantizer_from_config(keras_layer, 'bias')
8472

8573
return layer, output_shape
8674

@@ -93,11 +81,7 @@ def parse_qrnn_layer(keras_layer, input_names, input_shapes, data_reader):
9381

9482
layer['weight_quantizer'] = get_quantizer_from_config(keras_layer, 'kernel')
9583
layer['recurrent_quantizer'] = get_quantizer_from_config(keras_layer, 'recurrent')
96-
97-
if keras_layer['config']['bias_quantizer'] is not None:
98-
layer['bias_quantizer'] = get_quantizer_from_config(keras_layer, 'bias')
99-
else:
100-
layer['bias_quantizer'] = None
84+
layer['bias_quantizer'] = get_quantizer_from_config(keras_layer, 'bias')
10185

10286
return layer, output_shape
10387

hls4ml/templates/quartus/firmware/nnet_utils/nnet_conv1d_resource.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33

44
#include "nnet_common.h"
55
#include "nnet_dense.h"
6+
#include <cstdint>
67

78
namespace nnet {
89

hls4ml/templates/quartus/firmware/nnet_utils/nnet_conv2d_resource.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
#include "nnet_common.h"
55
#include "nnet_dense.h"
66
#include "nnet_helpers.h"
7+
#include <cstdint>
78

89
namespace nnet {
910

0 commit comments

Comments
 (0)