Skip to content

Commit c3c3163

Browse files
Resolve logger warnings (#2250)
Signed-off-by: Emmanuel Ferdman <emmanuelferdman@gmail.com>
1 parent 14965e4 commit c3c3163

File tree

2 files changed

+6
-6
lines changed

2 files changed

+6
-6
lines changed

torchao/dtypes/affine_quantized_tensor_ops.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -130,8 +130,8 @@ def deregister_aqt_quantized_linear_dispatch(dispatch_condition):
130130
if dispatch_condition in _AQT_QLINEAR_DISPATCH_TABLE:
131131
del _AQT_QLINEAR_DISPATCH_TABLE[dispatch_condition]
132132
else:
133-
logger.warn(
134-
f"Attempting to remove non-existant dispatch condition {dispatch_condition}"
133+
logger.warning(
134+
f"Attempting to remove non-existent dispatch condition {dispatch_condition}"
135135
)
136136

137137

@@ -273,7 +273,7 @@ def _(func, types, args, kwargs):
273273
try:
274274
return weight_tensor._quantized_linear_op(input_tensor, weight_tensor, bias)
275275
except QuantizedLinearNotImplementedError as e:
276-
# fallback path is only called when user did not specify a specfic quantized linear implementation with `_layout.quantized_linear_impl`
276+
# fallback path is only called when user did not specify a specific quantized linear implementation with `_layout.quantized_linear_impl`
277277
if (
278278
isinstance(weight_tensor, AffineQuantizedTensor)
279279
and hasattr(weight_tensor._layout, "quantized_linear_impl")
@@ -362,7 +362,7 @@ def _(func, types, args, kwargs):
362362
input_tensor, transposed_weight_tensor, bias
363363
)
364364
except QuantizedLinearNotImplementedError as e:
365-
# fallback path is only called when user did not specify a specfic quantized linear implementation with `_layout.quantized_linear_impl`
365+
# fallback path is only called when user did not specify a specific quantized linear implementation with `_layout.quantized_linear_impl`
366366
if (
367367
isinstance(weight_tensor, AffineQuantizedTensor)
368368
and hasattr(weight_tensor._layout, "quantized_linear_impl")
@@ -396,7 +396,7 @@ def _(func, types, args, kwargs):
396396
input_tensor, transposed_weight_tensor, bias
397397
)
398398
except QuantizedLinearNotImplementedError as e:
399-
# fallback path is only called when user did not specify a specfic quantized linear implementation with `_layout.quantized_linear_impl`
399+
# fallback path is only called when user did not specify a specific quantized linear implementation with `_layout.quantized_linear_impl`
400400
if (
401401
isinstance(weight_tensor, AffineQuantizedTensor)
402402
and hasattr(weight_tensor._layout, "quantized_linear_impl")

torchao/quantization/quant_api.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1797,7 +1797,7 @@ def _uintx_weight_only_transform(
17971797

17981798
if use_hqq:
17991799
if dtype == torch.uint4:
1800-
logger.warn(
1800+
logger.warning(
18011801
"Recommended to use `int4_weight_only(group_size, use_hqq=True)` for the best performance"
18021802
)
18031803
quant_min, quant_max = _DTYPE_TO_QVALUE_BOUNDS[dtype]

0 commit comments

Comments
 (0)