Skip to content

Commit 38e36de

Browse files
facebook-github-botCodemodService Bot
andauthored
Auto-fix lint violations from Fixit] fbcode//pytorch/ao (#1752)
Auto-fix lint violations from Fixit] fbcode//pytorch/ao (#1752) Summary: Pull Request resolved: #1752 Reviewed By: amyreese Differential Revision: D69041228 Co-authored-by: CodemodService Bot <ltoonqpgsf_1546886053@tfbnw.net>
1 parent 089cd7e commit 38e36de

File tree

1 file changed

+4
-4
lines changed

1 file changed

+4
-4
lines changed

torchao/quantization/GPTQ.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -759,15 +759,15 @@ def _create_quantized_state_dict(
759759
if self.padding_allowed:
760760
import torch.nn.functional as F
761761

762-
logging.warn(
762+
logging.warning(
763763
f"warning: {fqn} is padded to satisfy in_features % 1024 == 0"
764764
)
765765
padded_in_features = find_multiple(in_features, 1024)
766766
weight = F.pad(
767767
weight, pad=(0, padded_in_features - in_features)
768768
)
769769
else:
770-
logging.warn(
770+
logging.warning(
771771
f"warning: {fqn} is skipped, int4 requires that in_features is 32, 64, or is divisible by 1024, "
772772
+ "and that groupsize and inner_k_tiles*16 evenly divide into it"
773773
)
@@ -1147,15 +1147,15 @@ def _create_quantized_state_dict(
11471147
if self.padding_allowed:
11481148
import torch.nn.functional as F
11491149

1150-
logging.warn(
1150+
logging.warning(
11511151
f"warning: {fqn} is padded to satisfy in_features % 1024 == 0"
11521152
)
11531153
padded_in_features = find_multiple(in_features, 1024)
11541154
weight = F.pad(
11551155
weight, pad=(0, padded_in_features - in_features)
11561156
)
11571157
else:
1158-
logging.warn(
1158+
logging.warning(
11591159
f"warning: {fqn} is skipped, int4 requires that in_features is 32, 64, or is divisible by 1024, "
11601160
+ "and that groupsize and inner_k_tiles*16 evenly divide into it"
11611161
)

0 commit comments

Comments
 (0)