32
32
33
33
34
34
class TrainableEquivalentTransformation :
35
- """Weight-only quantization, Trainable Equivalent Transformation (TEQ): linear wrapper to apply scale to input."""
35
+ """Weight-only quantization, Trainable Equivalent Transformation (TEQ)."""
36
+
37
+ _PREPARE_ATTRS : list [str ] = ["weight_config" , "trained_alphas" ]
38
+ _PREPARE_ATTRS_PREFIX = "_prepare_"
36
39
37
40
def __init__ (self , model , weight_config = {}, absorb_to_layer = {}, folding = True , example_inputs = None ):
38
41
"""
@@ -47,7 +50,6 @@ def __init__(self, model, weight_config={}, absorb_to_layer={}, folding=True, ex
47
50
self .device = self ._get_device ()
48
51
self .trained_alphas = {}
49
52
self .absorb_to_layer = absorb_to_layer
50
- self ._prepared_attrs = ["weight_config" , "trained_alphas" ]
51
53
self ._post_initialized = False
52
54
53
55
def _post_init (self ):
@@ -353,13 +355,13 @@ def prepare(self, model, *args, **kwargs):
353
355
self ._quantizer .model = float_model
354
356
logger .info ("TEQ quantizing start." )
355
357
self ._quantizer .add_tuning_scale ()
356
- for attr in self ._quantizer ._prepared_attrs :
357
- setattr (float_model , "_" + attr , getattr (self ._quantizer , attr ))
358
+ for attr in self ._quantizer ._PREPARE_ATTRS :
359
+ setattr (float_model , self . _quantizer . _PREPARE_ATTRS_PREFIX + attr , getattr (self ._quantizer , attr ))
358
360
return float_model
359
361
360
362
def convert (self , model , * args : Any , ** kwargs : Any ):
361
- for attr in self ._quantizer ._prepared_attrs :
362
- setattr (self ._quantizer , attr , getattr (model , "_" + attr , None ))
363
+ for attr in self ._quantizer ._PREPARE_ATTRS :
364
+ setattr (self ._quantizer , attr , getattr (model , self . _quantizer . _PREPARE_ATTRS_PREFIX + attr , None ))
363
365
self ._quantizer .model = model
364
366
self ._quantizer .transform ()
365
367
self ._quantizer .quantize ()
0 commit comments