Skip to content

Treat PReLU in infer_precision #1329

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 4 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 19 additions & 2 deletions hls4ml/model/optimizer/passes/infer_precision.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,8 @@ def _infer_precision(self, node, types_to_infer):
if node_class in ['ParametrizedActivation']:
return self._infer_par_act_precision(node, types_to_infer)

if node_class in ['PReLU']:
return self._infer_prelu_act_precision(node, types_to_infer)
# What about quantized activation layer? Setting it to 'auto' manually will break it here. We should prevent
# this in config_from_* functions

Expand Down Expand Up @@ -567,9 +569,24 @@ def _infer_par_act_precision(self, node, types_to_infer):
# For threshold relu, set the parameter precision to be the input precision by default;
# for other parametrized activations, just allow the default precision to be used.
# Can override these values in the configuration by explicitly setting them.
if 'param_t' in inferred_types and self.get_attr('activation').lower() == 'thresholdedrelu':
if 'param_t' in types_to_infer and node.get_attr('activation').lower() == 'thresholdedrelu':
in_type = node.get_input_variable().type.precision
node.attributes['param_t'].type = in_type
node.attributes['param_t'].precision = in_type
inferred_types.append('param_t')

return inferred_types

def _infer_prelu_act_precision(self, node, types_to_infer):
inferred_types = []

# For PReLU, set the parameter precision to be the input precision by default;
# As the parameters are stored as a weight tensor, need to update that precision as well.
if 'param_t' in types_to_infer and node.get_attr('activation').lower() == 'prelu':

in_type = node.get_input_variable().type.precision
node.attributes['param_t'].precision = in_type
node.weights['param'].update_precision(node.types['param_t'].precision)

inferred_types.append('param_t')

return inferred_types
8 changes: 6 additions & 2 deletions test/pytest/test_activations.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,8 @@

import numpy as np
import pytest
from tensorflow.keras.layers import ELU, Activation, Input, LeakyReLU, ReLU, ThresholdedReLU
import tensorflow as tf
from tensorflow.keras.layers import ELU, Activation, Input, LeakyReLU, PReLU, ReLU, ThresholdedReLU
from tensorflow.keras.models import Model

import hls4ml
Expand All @@ -24,7 +25,7 @@
(ELU(alpha=1.25), 'elu'),
(Activation('selu'), 'selu'),
# Tensorflow exception of multi-dimensional PReLU (8, 8, 3)
# (PReLU(alpha_initializer='zeros'), 'prelu'),
(PReLU(alpha_initializer=tf.initializers.constant(0.25)), 'prelu'),
(Activation('softplus'), 'softplus'),
(Activation('softsign'), 'softsign'),
(Activation(activation='tanh'), 'tanh'),
Expand All @@ -35,6 +36,9 @@
],
)
def test_activations(backend, activation, name, shape, io_type):

if name == "prelu" and shape == (8, 8, 3):
return
# Subtract 0.5 to include negative values
X = np.random.rand(1000, *shape) - 0.5

Expand Down
Loading