Skip to content

Commit 655fbd7

Browse files
update import
1 parent 2d812af commit 655fbd7

File tree

1 file changed

+2
-2
lines changed

1 file changed

+2
-2
lines changed

test/quantization/test_qat.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818

1919
from torchao import quantize_
2020
from torchao.float8.config import ScalingGranularity
21-
from torchao.float8.float8_scaling_utils import hp_tensor_to_float8_dynamic
21+
from torchao.float8.float8_scaling_utils import _hp_tensor_to_float8_dynamic
2222
from torchao.float8.float8_tensor import LinearMMConfig
2323
from torchao.quantization.granularity import (
2424
PerAxis,
@@ -1703,7 +1703,7 @@ def test_float8_rowwise_fake_quantize(self):
17031703
x = torch.randn(32, 64)
17041704
axiswise_dim = 0
17051705
out = _Float8RowwiseFakeQuantize.apply(x, dtype, axiswise_dim)
1706-
out_expected = hp_tensor_to_float8_dynamic(
1706+
out_expected = _hp_tensor_to_float8_dynamic(
17071707
x,
17081708
dtype,
17091709
LinearMMConfig(),

0 commit comments

Comments
 (0)