Skip to content

Commit 48fbbd8

Browse files
committed
rename to map_module_to_scheme
Signed-off-by: Kyle Sayers <kylesayrs@gmail.com>
1 parent c1b06de commit 48fbbd8

File tree

1 file changed

+3
-7
lines changed

1 file changed

+3
-7
lines changed

src/compressed_tensors/compressors/model_compressors/model_compressor.py

Lines changed: 3 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,6 @@
4141
load_pretrained_quantization,
4242
)
4343
from compressed_tensors.quantization.lifecycle import expand_target_names
44-
from compressed_tensors.quantization.quant_args import QuantizationArgs
4544
from compressed_tensors.quantization.utils import (
4645
is_module_quantized,
4746
iter_named_leaf_modules,
@@ -62,7 +61,7 @@
6261
from transformers.file_utils import CONFIG_NAME
6362

6463

65-
__all__ = ["ModelCompressor", "map_modules_to_quant_scheme"]
64+
__all__ = ["ModelCompressor", "map_module_to_scheme"]
6665

6766
_LOGGER: logging.Logger = logging.getLogger(__name__)
6867

@@ -373,11 +372,8 @@ def compress(
373372
if state_dict is None:
374373
state_dict = model.state_dict()
375374

376-
module_to_scheme: Dict[str, QuantizationScheme] = map_modules_to_quant_scheme(
377-
model
378-
)
379-
380375
if self.quantization_compressor is not None:
376+
module_to_scheme = map_module_to_scheme(model)
381377
state_dict = self.quantization_compressor.compress(
382378
state_dict, names_to_scheme=module_to_scheme
383379
)
@@ -521,7 +517,7 @@ def _replace_weights(self, dense_weight_generator, model: Module):
521517
update_parameter_data(module, data, param_name)
522518

523519

524-
def map_modules_to_quant_scheme(
520+
def map_module_to_scheme(
525521
model: Module,
526522
) -> Dict[str, QuantizationScheme]:
527523
"""

0 commit comments

Comments
 (0)