We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent b11b96a commit e8c6c8fCopy full SHA for e8c6c8f
src/compressed_tensors/quantization/utils/helpers.py
@@ -92,7 +92,7 @@ def calculate_qparams(
92
scales = scales.to(FP8_E4M3_DATA.dtype)
93
else:
94
# Divide over bit range over max value?
95
- scales = max_val_pos / (float(bit_radnge) / 2)
+ scales = max_val_pos / (float(bit_range) / 2)
96
97
# TODO: clamp not implemented for FP8 '
98
# scales = torch.clamp(scales, min=torch.finfo(torch.float32).eps)
0 commit comments