Skip to content

Commit 700d4b6

Browse files
committed
fix test
Signed-off-by: Kyle Sayers <kylesayrs@gmail.com>
1 parent dfef94d commit 700d4b6

File tree

2 files changed

+2
-4
lines changed

2 files changed

+2
-4
lines changed

src/compressed_tensors/compressors/model_compressors/model_compressor.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -382,8 +382,6 @@ def apply_compression_status(self, model: Module) -> Module:
382382
def replace_with_compressed(module: Module) -> Module:
383383
scheme = getattr(module, "quantization_scheme", None)
384384
if isinstance(module, torch.nn.Linear) and scheme is not None:
385-
# compressed_state_dict_2 = self.compress(module) # debug
386-
387385
module = CompressedLinear.from_linear(
388386
module,
389387
quantization_scheme=scheme,

tests/test_compressors/quantized_compressors/test_pack_quant.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -228,9 +228,9 @@ def test_asymmetric_packed_support(strategy):
228228
)
229229

230230
compressor = PackedQuantizationCompressor(config=quant_config)
231-
quantized_modules_to_args = {"dummy": quant_config.config_groups["group_1"].weights}
231+
quantized_modules_to_scheme = {"dummy": quant_config.config_groups["group_1"]}
232232
compressed_state_dict = compressor.compress(
233-
dense_state_dict, names_to_scheme=quantized_modules_to_args
233+
dense_state_dict, names_to_scheme=quantized_modules_to_scheme
234234
)
235235

236236
# compressed state_dict adds one entry for shape

0 commit comments

Comments
 (0)