diff --git a/hls4ml/model/optimizer/passes/infer_precision.py b/hls4ml/model/optimizer/passes/infer_precision.py index 919bc0c3c..98779a7e8 100644 --- a/hls4ml/model/optimizer/passes/infer_precision.py +++ b/hls4ml/model/optimizer/passes/infer_precision.py @@ -87,6 +87,8 @@ def _infer_precision(self, node, types_to_infer): if node_class in ['ParametrizedActivation']: return self._infer_par_act_precision(node, types_to_infer) + if node_class in ['PReLU']: + return self._infer_prelu_act_precision(node, types_to_infer) # What about quantized activation layer? Setting it to 'auto' manually will break it here. We should prevent # this in config_from_* functions @@ -567,9 +569,24 @@ def _infer_par_act_precision(self, node, types_to_infer): # For threshold relu, set the parameter precision to be the input precision by default; # for other parametrized activations, just allow the default precision to be used. # Can override these values in the configuration by explicitly setting them. - if 'param_t' in inferred_types and self.get_attr('activation').lower() == 'thresholdedrelu': + if 'param_t' in types_to_infer and node.get_attr('activation').lower() == 'thresholdedrelu': in_type = node.get_input_variable().type.precision - node.attributes['param_t'].type = in_type + node.attributes['param_t'].precision = in_type + inferred_types.append('param_t') + + return inferred_types + + def _infer_prelu_act_precision(self, node, types_to_infer): + inferred_types = [] + + # For PReLU, set the parameter precision to be the input precision by default; + # As the parameters are stored as a weight tensor, need to update that precision as well. + if 'param_t' in types_to_infer and node.get_attr('activation').lower() == 'prelu': + + in_type = node.get_input_variable().type.precision + node.attributes['param_t'].precision = in_type + node.weights['param'].update_precision(node.types['param_t'].precision) + inferred_types.append('param_t') return inferred_types diff --git a/test/pytest/test_activations.py b/test/pytest/test_activations.py index d1ccba512..5a605070a 100644 --- a/test/pytest/test_activations.py +++ b/test/pytest/test_activations.py @@ -2,7 +2,8 @@ import numpy as np import pytest -from tensorflow.keras.layers import ELU, Activation, Input, LeakyReLU, ReLU, ThresholdedReLU +import tensorflow as tf +from tensorflow.keras.layers import ELU, Activation, Input, LeakyReLU, PReLU, ReLU, ThresholdedReLU from tensorflow.keras.models import Model import hls4ml @@ -24,7 +25,7 @@ (ELU(alpha=1.25), 'elu'), (Activation('selu'), 'selu'), # Tensorflow exception of multi-dimensional PReLU (8, 8, 3) - # (PReLU(alpha_initializer='zeros'), 'prelu'), + (PReLU(alpha_initializer=tf.initializers.constant(0.25)), 'prelu'), (Activation('softplus'), 'softplus'), (Activation('softsign'), 'softsign'), (Activation(activation='tanh'), 'tanh'), @@ -35,6 +36,9 @@ ], ) def test_activations(backend, activation, name, shape, io_type): + + if name == "prelu" and shape == (8, 8, 3): + return # Subtract 0.5 to include negative values X = np.random.rand(1000, *shape) - 0.5