File tree Expand file tree Collapse file tree 1 file changed +2
-2
lines changed
neural_compressor/torch/quantization Expand file tree Collapse file tree 1 file changed +2
-2
lines changed Original file line number Diff line number Diff line change @@ -37,7 +37,7 @@ def load(output_dir="./saved_results", model=None):
37
37
qconfig_file_path = os .path .join (os .path .abspath (os .path .expanduser (output_dir )), "qconfig.json" )
38
38
with open (qconfig_file_path , "r" ) as f :
39
39
per_op_qconfig = json .load (f )
40
-
40
+
41
41
if " " in per_op_qconfig .keys (): # ipex qconfig format: {' ': {'q_op_infos': {'0': {'op_type': ...
42
42
from neural_compressor .torch .algorithms .static_quant import load
43
43
@@ -50,7 +50,7 @@ def load(output_dir="./saved_results", model=None):
50
50
from neural_compressor .torch .algorithms .weight_only .save_load import load
51
51
52
52
return load (output_dir )
53
-
53
+
54
54
model .qconfig = config_mapping
55
55
if isinstance (config_object , FP8Config ):
56
56
from neural_compressor .torch .algorithms .habana_fp8 import load
You can’t perform that action at this time.
0 commit comments