Skip to content

Commit f16f4c5

Browse files
[pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
1 parent 8fd0a21 commit f16f4c5

File tree

2 files changed

+7
-7
lines changed

2 files changed

+7
-7
lines changed

neural_compressor/torch/quantization/algorithm_entry.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -401,7 +401,7 @@ def autoround_quantize_entry(
401401
if getattr(model, "quantizer", False):
402402
del model.quantizer
403403
else:
404-
model.quantizer = quantizer
404+
model.quantizer = quantizer
405405
model = quantizer.execute(model=model, mode=mode, *args, **kwargs)
406406
logger.info("AutoRound quantization done.")
407407
return model

test/3x/torch/quantization/weight_only/test_autoround.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -5,10 +5,10 @@
55
from neural_compressor.torch.algorithms.weight_only.autoround import AutoRoundQuantizer, get_autoround_default_run_fn
66
from neural_compressor.torch.quantization import (
77
AutoRoundConfig,
8-
quantize,
9-
prepare,
108
convert,
11-
get_default_AutoRound_config
9+
get_default_AutoRound_config,
10+
prepare,
11+
quantize,
1212
)
1313
from neural_compressor.torch.utils import logger
1414

@@ -116,7 +116,7 @@ def test_quantizer(self, gpt_j_model):
116116
assert "transformer.h.0.attn.k_proj" in q_model.autoround_config.keys()
117117
assert "scale" in q_model.autoround_config["transformer.h.0.attn.k_proj"].keys()
118118
assert torch.float32 == q_model.autoround_config["transformer.h.0.attn.k_proj"]["scale_dtype"]
119-
119+
120120
def test_new_api(self, gpt_j_model):
121121
inp = torch.ones([1, 10], dtype=torch.long)
122122

@@ -127,7 +127,7 @@ def test_new_api(self, gpt_j_model):
127127
out1 = gpt_j_model(inp)
128128
quant_config = get_default_AutoRound_config()
129129
logger.info(f"Test AutoRound with config {quant_config}")
130-
130+
131131
run_fn = get_autoround_default_run_fn
132132
run_args = (
133133
tokenizer,
@@ -147,4 +147,4 @@ def test_new_api(self, gpt_j_model):
147147
assert torch.allclose(out1[0], out2[0], atol=1e-1)
148148
assert "transformer.h.0.attn.k_proj" in q_model.autoround_config.keys()
149149
assert "scale" in q_model.autoround_config["transformer.h.0.attn.k_proj"].keys()
150-
assert torch.float32 == q_model.autoround_config["transformer.h.0.attn.k_proj"]["scale_dtype"]
150+
assert torch.float32 == q_model.autoround_config["transformer.h.0.attn.k_proj"]["scale_dtype"]

0 commit comments

Comments
 (0)