Skip to content

Commit d98c83a

Browse files
authored
fix typo and fp32 loading issue (#1294)
1 parent 88548dd commit d98c83a

File tree

9 files changed

+19
-14
lines changed

9 files changed

+19
-14
lines changed

examples/pytorch/nlp/huggingface_models/question-answering/quantization/ptq_static/fx/run_qa.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -616,9 +616,9 @@ def take_eval_steps(model, trainer, metric_name, save_metrics=False):
616616
if save_metrics:
617617
trainer.save_metrics("eval", metrics)
618618
logger.info("metrics keys: {}".format(metrics.keys()))
619-
print('Batch size = %d', batch_size)
619+
print('Batch size = %d' % batch_size)
620620
print("Finally Eval {} Accuracy: {}".format(metric_name, metrics.get(metric_name)))
621-
print("Latency: %.3f ms", (evalTime / samples * 1000))
621+
print("Latency: %.3f ms" % (evalTime / samples * 1000))
622622
print("Throughput: {} samples/sec".format(samples / evalTime))
623623
return metrics.get(metric_name)
624624

examples/pytorch/nlp/huggingface_models/question-answering/quantization/ptq_static/ipex/run_qa.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -620,9 +620,9 @@ def take_eval_steps(model, trainer, metric_name, save_metrics=False):
620620
if save_metrics:
621621
trainer.save_metrics("eval", metrics)
622622
logger.info("metrics keys: {}".format(metrics.keys()))
623-
print('Batch size = %d', batch_size)
623+
print('Batch size = %d' % batch_size)
624624
print("Finally Eval {} Accuracy: {}".format(metric_name, metrics.get(metric_name)))
625-
print("Latency: %.3f ms", (evalTime / samples * 1000))
625+
print("Latency: %.3f ms" % (evalTime / samples * 1000))
626626
print("Throughput: {} samples/sec".format(samples / evalTime))
627627
return metrics.get(metric_name)
628628

examples/pytorch/nlp/huggingface_models/text-classification/quantization/ptq_dynamic/eager/run_benchmark.sh

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -103,13 +103,14 @@ function run_benchmark {
103103
model_name_or_path=$input_model
104104
fi
105105

106+
extra_cmd='--model_name_or_path '${input_model}
106107
if [[ ${int8} == "true" ]]; then
108+
extra_cmd='--model_name_or_path '${tuned_checkpoint}
107109
extra_cmd=$extra_cmd" --int8"
108110
fi
109111
echo $extra_cmd
110112

111113
python -u run_glue_tune.py \
112-
--model_name_or_path ${tuned_checkpoint} \
113114
--task_name ${TASK_NAME} \
114115
--do_eval \
115116
--max_seq_length ${MAX_SEQ_LENGTH} \

examples/pytorch/nlp/huggingface_models/text-classification/quantization/ptq_dynamic/fx/run_benchmark.sh

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -71,13 +71,14 @@ function run_benchmark {
7171

7272
fi
7373

74+
extra_cmd='--model_name_or_path '${input_model}
7475
if [[ ${int8} == "true" ]]; then
76+
extra_cmd='--model_name_or_path '${tuned_checkpoint}
7577
extra_cmd=$extra_cmd" --int8"
7678
fi
7779
echo $extra_cmd
7880

7981
python -u run_glue.py \
80-
--model_name_or_path ${tuned_checkpoint} \
8182
--task_name ${TASK_NAME} \
8283
--do_eval \
8384
--max_seq_length ${MAX_SEQ_LENGTH} \

examples/pytorch/nlp/huggingface_models/text-classification/quantization/ptq_dynamic/fx/run_glue.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -486,9 +486,9 @@ def take_eval_steps(model, trainer, save_metrics=False):
486486
for key in bert_task_acc_keys:
487487
if key in metrics.keys():
488488
throughput = metrics.get("eval_samples_per_second")
489-
print('Batch size = %d', batch_size)
489+
print('Batch size = %d' % batch_size)
490490
print("Finally Eval {} Accuracy: {}".format(key, metrics[key]))
491-
print("Latency: %.3f ms", (1000 / throughput))
491+
print("Latency: %.3f ms" % (1000 / throughput))
492492
print("Throughput: {} samples/sec".format(throughput))
493493
return metrics[key]
494494
assert False, "No metric returned, Please check inference metric!"

examples/pytorch/nlp/huggingface_models/text-classification/quantization/ptq_static/fx/run_benchmark.sh

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -100,13 +100,14 @@ function run_benchmark {
100100
model_name_or_path=${input_model}
101101
fi
102102

103+
extra_cmd='--model_name_or_path '${input_model}
103104
if [[ ${int8} == "true" ]]; then
105+
extra_cmd='--model_name_or_path '${tuned_checkpoint}
104106
extra_cmd=$extra_cmd" --int8"
105107
fi
106108
echo $extra_cmd
107109

108110
python -u run_glue.py \
109-
--model_name_or_path ${tuned_checkpoint} \
110111
--task_name ${TASK_NAME} \
111112
--do_eval \
112113
--max_seq_length ${MAX_SEQ_LENGTH} \

examples/pytorch/nlp/huggingface_models/text-classification/quantization/ptq_static/fx/run_glue.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -486,9 +486,9 @@ def take_eval_steps(model, trainer, save_metrics=False):
486486
for key in bert_task_acc_keys:
487487
if key in metrics.keys():
488488
throughput = metrics.get("eval_samples_per_second")
489-
print('Batch size = %d', batch_size)
489+
print('Batch size = %d' % batch_size)
490490
print("Finally Eval {} Accuracy: {}".format(key, metrics[key]))
491-
print("Latency: %.3f ms", (1000 / throughput))
491+
print("Latency: %.3f ms" % (1000 / throughput))
492492
print("Throughput: {} samples/sec".format(throughput))
493493
return metrics[key]
494494
assert False, "No metric returned, Please check inference metric!"

examples/pytorch/nlp/huggingface_models/text-classification/quantization/qat/fx/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,7 @@ or
7878
or
7979

8080
python run_glue_tune.py \
81-
--model_name_or_path ${tuned_checkpoint} \
81+
--model_name_or_path ${input_model}/${tuned_checkpoint} \
8282
--task_name ${task_name} \
8383
--do_train \
8484
--do_eval \

examples/pytorch/nlp/huggingface_models/text-classification/quantization/qat/fx/run_benchmark.sh

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -58,12 +58,13 @@ function run_benchmark {
5858
mode_cmd="--benchmark "
5959
fi
6060

61+
extra_cmd='--model_name_or_path '${input_model}
6162
if [[ ${int8} == "true" ]]; then
62-
mode_cmd=$mode_cmd"--int8"
63+
extra_cmd='--model_name_or_path '${tuned_checkpoint}
64+
extra_cmd=$extra_cmd" --int8"
6365
fi
6466

6567
python run_glue_tune.py \
66-
--model_name_or_path ${tuned_checkpoint} \
6768
--task_name ${task_name} \
6869
--do_train \
6970
--do_eval \
@@ -74,6 +75,7 @@ function run_benchmark {
7475
--num_train_epochs 3 \
7576
--metric_for_best_model f1 \
7677
--output_dir ./output_log --overwrite_output_dir \
78+
${extra_cmd} \
7779
${mode_cmd}
7880
}
7981

0 commit comments

Comments
 (0)