@@ -35,6 +35,7 @@ function run_test() {
35
35
run " $5 "
36
36
}
37
37
power=' --power=yes --adr.mlperf-power-client.power_server=192.168.0.15 --adr.mlperf-power-client.port=4950 '
38
+ power=' '
38
39
39
40
# Add your run commands here...
40
41
find_performance_cmd=' mlcr generate-run-cmds,inference,_find-performance \
@@ -51,37 +52,20 @@ submission_cmd_scenario='mlcr generate-run-cmds,inference,_submission --scenari
51
52
--category=$category --division=$division --quiet --results_dir=$HOME/results_dir \
52
53
--skip_submission_generation=yes --execution_mode=valid $power'
53
54
54
- readme_cmd_single=' mlcr generate-run-cmds,inference,_populate-readme \
55
- --model=$model --implementation=$implementation --device=$device --backend=$backend \
56
- --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \
57
- --skip_submission_generation=yes --execution_mode=valid $power'
58
-
59
- readme_cmd=' mlcr generate-run-cmds,inference,_populate-readme,_all-scenarios \
60
- --model=$model --implementation=$implementation --device=$device --backend=$backend \
61
- --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \
62
- --skip_submission_generation=yes --execution_mode=valid $power'
63
55
64
56
# run "$MLC_RUN_CMD"
65
57
run_test " onnxruntime" " 200" " reference" " cpu" " $find_performance_cmd "
66
58
run_test " tf" " 200" " reference" " cpu" " $find_performance_cmd "
67
- run_test " onnxruntime" " 10000" " reference" " cuda" " $find_performance_cmd "
68
- run_test " tf" " 20000" " reference" " cuda" " $find_performance_cmd "
69
59
70
60
run_test " onnxruntime" " 100" " reference" " cpu" " $submission_cmd "
71
61
run_test " tf" " 100" " reference" " cpu" " $submission_cmd "
72
62
scenario=" SingleStream"
73
63
run_test " tflite" " 100" " tflite-cpp" " cpu" " $submission_cmd_scenario --adr.compiler.tags=gcc"
74
64
run_test " tflite" " 100" " tflite-cpp" " cpu" " $submission_cmd_scenario --adr.compiler.tags=gcc --adr.mlperf-inference-implementation.compressed_dataset=on"
65
+
66
+
67
+ run_test " onnxruntime" " 10000" " reference" " cuda" " $find_performance_cmd "
68
+ run_test " tf" " 20000" " reference" " cuda" " $find_performance_cmd "
75
69
run_test " onnxruntime" " 100" " reference" " cuda" " $submission_cmd "
76
- scenario=" Offline"
77
- run_test " tf" " 100" " reference" " cuda" " $submission_cmd_scenario "
78
- scenario=" SingleStream"
79
- run_test " tf" " 100" " reference" " cuda" " $submission_cmd_scenario "
80
-
81
- run_test " onnxruntime" " 100" " reference" " cpu" " $readme_cmd "
82
- run_test " tf" " 100" " reference" " cpu" " $readme_cmd "
83
- run_test " tflite" " 100" " tflite-cpp" " cpu" " $readme_cmd_single --adr.compiler.tags=gcc --scenario=SingleStream"
84
- run_test " tflite" " 100" " tflite-cpp" " cpu" " $readme_cmd_single --adr.compiler.tags=gcc --scenario=SingleStream --adr.mlperf-inference-implementation.compressed_dataset=on"
85
- run_test " onnxruntime" " 100" " reference" " cuda" " $readme_cmd --scenario=SingleStream"
86
- run_test " tf" " 100" " reference" " cuda" " $readme_cmd_single --scenario=SingleStream"
87
- run_test " tf" " 100" " reference" " cuda" " $readme_cmd_single --scenario=Offline"
70
+ run_test " tf" " 100" " reference" " cuda" " $submission_cmd "
71
+
0 commit comments