Skip to content

Commit a278674

Browse files
committed
style
1 parent 503e33a commit a278674

File tree

5 files changed

+29
-29
lines changed

5 files changed

+29
-29
lines changed

hls4ml/converters/keras_v3/hgq2/_base.py

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717

1818
def extract_fixed_quantizer_config(q, tensor: 'KerasTensor', is_input: bool) -> dict[str, Any]:
1919
from hgq.quantizer.internal.fixed_point_quantizer import FixedPointQuantizerKBI, FixedPointQuantizerKIF
20-
from keras.ops import convert_to_numpy
20+
from keras import ops
2121

2222
internal_q: FixedPointQuantizerKIF | FixedPointQuantizerKBI = q.quantizer
2323

@@ -26,7 +26,7 @@ def extract_fixed_quantizer_config(q, tensor: 'KerasTensor', is_input: bool) ->
2626
raise ValueError(f"Tensor {tensor.name} has at least one dimension with no fixed size")
2727
k, i, f = internal_q.kif
2828
k, B, I = k, k + i + f, k + i # type: ignore # noqa: E741
29-
k, B, I = convert_to_numpy(k), convert_to_numpy(B), convert_to_numpy(I) # noqa: E741
29+
k, B, I = ops.convert_to_numpy(k), ops.convert_to_numpy(B), ops.convert_to_numpy(I) # noqa: E741
3030
I = np.where(B > 0, I, 0) # noqa: E741 # type: ignore
3131

3232
k = np.broadcast_to(k.astype(np.int8), (1,) + shape) # type: ignore
@@ -61,7 +61,7 @@ def override_io_tensor_confs(confs: tuple[dict[str, Any], ...], overrides: dict[
6161

6262

6363
@register
64-
class SQLayerHandler(KerasV3LayerHandler):
64+
class QLayerHandler(KerasV3LayerHandler):
6565
def __call__(
6666
self,
6767
layer: 'hgq.layers.QLayerBase',
@@ -101,23 +101,23 @@ def __call__(
101101
return *iq_confs, *ret, *oq_confs
102102

103103
def load_weight(self, layer: 'Layer', key: str):
104-
from keras.ops import convert_to_numpy
104+
from keras import ops
105105

106106
if hasattr(layer, f'q{key}'):
107-
return convert_to_numpy(getattr(layer, f'q{key}'))
107+
return ops.convert_to_numpy(getattr(layer, f'q{key}'))
108108
return super().load_weight(layer, key)
109109

110110

111111
@register
112-
class SQEinsumDenseHandler(SQLayerHandler, EinsumDenseHandler):
112+
class QEinsumDenseHandler(QLayerHandler, EinsumDenseHandler):
113113
handles = (
114114
'hgq.layers.core.einsum_dense.QEinsumDense',
115115
'hgq.layers.einsum_dense_batchnorm.QEinsumDenseBatchnorm',
116116
)
117117

118118

119119
@register
120-
class SQStandaloneQuantizerHandler(KerasV3LayerHandler):
120+
class QStandaloneQuantizerHandler(KerasV3LayerHandler):
121121
handles = ('hgq.quantizer.quantizer.Quantizer',)
122122

123123
def handle(
@@ -132,7 +132,7 @@ def handle(
132132

133133

134134
@register
135-
class SQConvHandler(SQLayerHandler, ConvHandler):
135+
class QConvHandler(QLayerHandler, ConvHandler):
136136
handles = (
137137
'hgq.layers.conv.QConv1D',
138138
'hgq.layers.conv.QConv2D',
@@ -158,7 +158,7 @@ def handle(
158158

159159

160160
@register
161-
class SQDenseHandler(SQLayerHandler, DenseHandler):
161+
class QDenseHandler(QLayerHandler, DenseHandler):
162162
handles = ('hgq.layers.core.dense.QDense', 'hgq.layers.core.dense.QBatchNormDense')
163163

164164
def handle(
@@ -177,12 +177,12 @@ def handle(
177177

178178

179179
@register
180-
class SQActivationHandler(SQLayerHandler, ActivationHandler):
180+
class QActivationHandler(QLayerHandler, ActivationHandler):
181181
handles = ('hgq.layers.activation.QActivation',)
182182

183183

184184
@register
185-
class SQBatchNormalizationHandler(SQLayerHandler):
185+
class QBatchNormalizationHandler(QLayerHandler):
186186
handles = ('hgq.layers.batch_normalization.QBatchNormalization',)
187187

188188
def handle(
@@ -208,7 +208,7 @@ def handle(
208208

209209

210210
@register
211-
class SQMergeHandler(SQLayerHandler, MergeHandler):
211+
class QMergeHandler(QLayerHandler, MergeHandler):
212212
handles = (
213213
'hgq.layers.ops.merge.QAdd',
214214
'hgq.layers.ops.merge.QSubtract',

hls4ml/converters/keras_v3/hgq2/einsum.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,16 +1,16 @@
11
import typing
2-
from typing import Sequence
2+
from collections.abc import Sequence
33

44
from ..einsum_dense import strip_batch_dim
5-
from ._base import SQLayerHandler, register
5+
from ._base import QLayerHandler, register
66

77
if typing.TYPE_CHECKING:
88
import hgq
99
from keras import KerasTensor
1010

1111

1212
@register
13-
class SQEinsumHandler(SQLayerHandler):
13+
class QEinsumHandler(QLayerHandler):
1414
handles = ('hgq.layers.ops.einsum.QEinsum',)
1515

1616
def handle(

hls4ml/converters/keras_v3/hgq2/multi_head_attention.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,20 +1,20 @@
11
import typing
2+
from collections.abc import Sequence
23
from inspect import Signature
3-
from typing import Sequence
44

55
import numpy as np
66

7-
from ._base import SQEinsumDenseHandler, SQLayerHandler, register
8-
from .einsum import SQEinsumHandler
9-
from .softmax import SQSoftmaxHandler
7+
from ._base import QEinsumDenseHandler, QLayerHandler, register
8+
from .einsum import QEinsumHandler
9+
from .softmax import QSoftmaxHandler
1010

1111
if typing.TYPE_CHECKING:
1212
import hgq
1313
from keras import KerasTensor
1414

1515

1616
@register
17-
class SQMultiHeadAttentionHandler(SQLayerHandler):
17+
class QMultiHeadAttentionHandler(QLayerHandler):
1818
handles = ('hgq.layers.multi_head_attention.QMultiHeadAttention',)
1919

2020
def handle(
@@ -99,9 +99,9 @@ def handle(
9999
tensor_pre_score = KerasTensor(name=f'{unique_name}_pre_score', shape=score_batch_shape)
100100
tensor_score = KerasTensor(name=f'{unique_name}_score', shape=score_batch_shape)
101101

102-
einsum_handler = SQEinsumHandler()
103-
einsum_dense_handler = SQEinsumDenseHandler()
104-
softmax_handler = SQSoftmaxHandler()
102+
einsum_handler = QEinsumHandler()
103+
einsum_dense_handler = QEinsumDenseHandler()
104+
softmax_handler = QSoftmaxHandler()
105105

106106
config_to_Q = einsum_dense_handler(to_Q, [tensor_q], [tensor_Q])
107107
config_to_K = einsum_dense_handler(to_K, [tensor_k], [tensor_K])

hls4ml/converters/keras_v3/hgq2/softmax.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,10 @@
11
import typing
2+
from collections.abc import Sequence
23
from math import prod
3-
from typing import Sequence
44

55
from hls4ml.model.types import FixedPrecisionType, RoundingMode, SaturationMode
66

7-
from ._base import SQLayerHandler, register
7+
from ._base import QLayerHandler, register
88

99
if typing.TYPE_CHECKING:
1010
import hgq
@@ -40,7 +40,7 @@ def fixed_quantizer_to_hls4ml_t(q: 'FixedPointQuantizerBase', take_max=False):
4040

4141

4242
@register
43-
class SQSoftmaxHandler(SQLayerHandler):
43+
class QSoftmaxHandler(QLayerHandler):
4444
handles = ('hgq.layers.softmax.QSoftmax',)
4545

4646
def handle(

hls4ml/converters/keras_v3/hgq2/unary_lut.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,12 @@
11
import typing
2-
from typing import Sequence
2+
from collections.abc import Sequence
33

44
import numpy as np
55
from quantizers import float_quantize, get_fixed_quantizer_np
66

77
from hls4ml.model.types import FixedPrecisionType
88

9-
from ._base import KerasV3LayerHandler, SQLayerHandler, register
9+
from ._base import KerasV3LayerHandler, QLayerHandler, register
1010

1111
if typing.TYPE_CHECKING:
1212
import hgq
@@ -18,7 +18,7 @@
1818

1919

2020
@register
21-
class SQUnaryLUTHandler(SQLayerHandler, KerasV3LayerHandler):
21+
class QUnaryLUTHandler(QLayerHandler, KerasV3LayerHandler):
2222
handles = ('hgq.layers.activation.QUnaryFunctionLUT',)
2323

2424
def handle(

0 commit comments

Comments
 (0)