Skip to content

Commit 53b63b1

Browse files
committed
Added docstring
Signed-off-by: shanjiaz <zsjwpianpian@gmail.com>
1 parent 4a5f064 commit 53b63b1

File tree

1 file changed

+3
-3
lines changed

1 file changed

+3
-3
lines changed

src/compressed_tensors/compressors/model_compressors/model_compressor.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -384,6 +384,8 @@ def compress_model(self, model: Module, is_meta: bool = False):
384384
this method is more memory-efficient than `self.compress`
385385
386386
:param model: model containing parameters to compress
387+
:param is_meta: whether the model is on the meta device, in which case
388+
we do not need move parameters to CPU
387389
"""
388390
module_to_scheme = map_module_to_scheme(model)
389391
sparse_compression_targets: Set[str] = expand_target_names(
@@ -488,9 +490,7 @@ def decompress_model(self, model: Module):
488490
# replace with decompressed parameters
489491
for name, value in state_dict.items():
490492
name = name.removeprefix(f"{prefix}.")
491-
# skipping save if we're just registering the model on meta device
492-
if exec_device != "meta":
493-
value = value.to(exec_device)
493+
value = value.to(exec_device)
494494
param = torch.nn.Parameter(value, requires_grad=False)
495495
register_offload_parameter(module, name, param, offload_device)
496496

0 commit comments

Comments
 (0)