From 8e674e28b18396a1499920398a285fa3570c4420 Mon Sep 17 00:00:00 2001 From: Kyle Sayers Date: Wed, 9 Jul 2025 22:25:47 -0400 Subject: [PATCH] only compress modules with weight quantization Signed-off-by: Kyle Sayers --- .../compressors/model_compressors/model_compressor.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/compressed_tensors/compressors/model_compressors/model_compressor.py b/src/compressed_tensors/compressors/model_compressors/model_compressor.py index c5c10845..b1951ecd 100644 --- a/src/compressed_tensors/compressors/model_compressors/model_compressor.py +++ b/src/compressed_tensors/compressors/model_compressors/model_compressor.py @@ -747,12 +747,16 @@ def _replace_weights(self, dense_weight_generator, model: Module): def map_module_to_scheme(model: Module) -> Dict[str, QuantizationScheme]: """ - Returns a dictionary which maps quantized module names to their quantization schemes + Returns a dictionary which maps quantized module names to their quantization + schemes. Only includes modules with weight quantization """ return { fix_fsdp_module_name(name): module.quantization_scheme for name, module in model.named_modules() - if is_module_quantized(module) + if ( + hasattr(module, "quantization_scheme") and + module.quantization_scheme.weights is not None + ) }