Skip to content

Commit d955b5e

Browse files
committed
remove unused utils
Signed-off-by: Kyle Sayers <kylesayrs@gmail.com>
1 parent 6d8f9eb commit d955b5e

File tree

2 files changed

+1
-53
lines changed

2 files changed

+1
-53
lines changed

src/compressed_tensors/compressors/model_compressors/model_compressor.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -33,15 +33,13 @@
3333
from compressed_tensors.compressors.base import BaseCompressor
3434
from compressed_tensors.compressors.sparse_compressors import DenseCompressor
3535
from compressed_tensors.config import CompressionFormat, SparsityCompressionConfig
36-
from compressed_tensors.linear.compressed_linear import CompressedLinear
3736
from compressed_tensors.quantization import (
3837
DEFAULT_QUANTIZATION_METHOD,
3938
QuantizationConfig,
4039
QuantizationScheme,
4140
QuantizationStatus,
4241
apply_quantization_config,
4342
load_pretrained_quantization_parameters,
44-
unwrap_module_forward_quantized,
4543
)
4644
from compressed_tensors.quantization.lifecycle import expand_target_names
4745
from compressed_tensors.quantization.utils import (
@@ -52,15 +50,13 @@
5250
get_safetensors_folder,
5351
has_offloaded_params,
5452
merge_names,
55-
module_map_replace,
5653
register_offload_parameter,
5754
update_parameter_data,
5855
)
5956
from compressed_tensors.utils.helpers import (
6057
fix_fsdp_module_name,
6158
is_compressed_tensors_config,
6259
)
63-
from compressed_tensors.utils.offload import disable_hf_hook, update_offload_parameter
6460
from torch import Tensor
6561
from torch.nn import Module
6662
from tqdm import tqdm

src/compressed_tensors/utils/helpers.py

Lines changed: 1 addition & 49 deletions
Original file line numberDiff line numberDiff line change
@@ -14,11 +14,10 @@
1414

1515
import warnings
1616
from functools import wraps
17-
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Union
17+
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional
1818

1919
import numpy
2020
import torch
21-
import tqdm
2221
from transformers import AutoConfig
2322

2423

@@ -39,8 +38,6 @@
3938
"shard_tensor",
4039
"pack_bitmasks",
4140
"unpack_bitmasks",
42-
"remove_suffix",
43-
"module_map_replace",
4441
]
4542

4643
FSDP_WRAPPER_NAME = "_fsdp_wrapped_module"
@@ -331,48 +328,3 @@ def unpack_bitmasks(
331328
)
332329

333330
return unpacked_bitmasks_torch
334-
335-
336-
def remove_suffix(value: str, suffix: str) -> str:
337-
# can replace with str.removesuffix in python3.9+
338-
assert value.endswith(suffix)
339-
return value[: -len(suffix)]
340-
341-
342-
def module_map_replace(
343-
module: torch.nn.Module,
344-
func: Callable[[torch.nn.Module], torch.nn.Module],
345-
progress: Union[bool, tqdm.tqdm] = False,
346-
pre: bool = True,
347-
) -> torch.nn.Module:
348-
"""
349-
Replaces modules in a given `torch.nn.Module` recursively using a provided function.
350-
351-
This function traverses the module hierarchy and applies the `func` transformation
352-
either before (`pre=True`) or after (`pre=False`) recursing into children modules.
353-
Optionally displays progress using tqdm.
354-
355-
:param module: root module to replace
356-
:param func: module mapping function
357-
:param progress: if True, display a tqdm progress bar.
358-
If a `tqdm.tqdm` instance is provided, the instance will be updated
359-
:param pre: if True, apply with pre-order, post-order otherwise
360-
:return: the modified module after applying the function to all submodules
361-
"""
362-
if progress is True:
363-
total = len(list(module.modules()))
364-
progress = tqdm.tqdm(total=total)
365-
366-
if pre:
367-
module = func(module)
368-
369-
for name, child in list(module.named_children()):
370-
module.add_module(name, module_map_replace(child, func, pre, progress))
371-
372-
if not pre:
373-
module = func(module)
374-
375-
if isinstance(progress, tqdm.tqdm):
376-
progress.update(1)
377-
378-
return module

0 commit comments

Comments
 (0)