Skip to content

Commit aa54c5f

Browse files
committed
refine details
Signed-off-by: zehao-intel <zehao.huang@intel.com>
1 parent 264a484 commit aa54c5f

File tree

2 files changed

+2
-2
lines changed

2 files changed

+2
-2
lines changed

neural_compressor/torch/quantization/algorithm_entry.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -502,7 +502,7 @@ def fp8_quant_entry(
502502
return model
503503

504504

505-
###################### FP16 Algo Entry ##################################
505+
###################### Mixed Precision Algo Entry ##################################
506506
@register_algo(MIX_PRECISION)
507507
def mix_precision_entry(
508508
model: torch.nn.Module, configs_mapping: Dict[Tuple[str], MixPrecisionConfig], *args, **kwargs

neural_compressor/torch/quantization/config.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1173,7 +1173,7 @@ def register_supported_configs(cls) -> List[OperatorConfig]:
11731173
supported_configs = []
11741174
mix_precision_config = MixPrecisionConfig(
11751175
dtype=["fp16", "fp32"],
1176-
device=["cpu", "cuda"],
1176+
device=["auto", "cpu", "cuda"],
11771177
)
11781178
operators = cls.supported_fp16_ops
11791179
supported_configs.append(OperatorConfig(config=mix_precision_config, operators=operators))

0 commit comments

Comments
 (0)