diff --git a/script/benchmark-program-mlperf/customize.py b/script/benchmark-program-mlperf/customize.py index 3f92511eb..493505c48 100644 --- a/script/benchmark-program-mlperf/customize.py +++ b/script/benchmark-program-mlperf/customize.py @@ -33,9 +33,9 @@ def postprocess(i): echo \${MLC_MLPERF_RUN_COUNT} > \${MLC_RUN_DIR}/count.txt; if [ \${MLC_MLPERF_RUN_COUNT} -eq 1 ]; then -export MLC_MLPERF_USER_CONF="${MLC_MLPERF_RANGING_USER_CONF}"; +export MLC_MLPERF_USER_CONF="\${MLC_MLPERF_RANGING_USER_CONF}"; else -export MLC_MLPERF_USER_CONF="${MLC_MLPERF_TESTING_USER_CONF}"; +export MLC_MLPERF_USER_CONF="\${MLC_MLPERF_TESTING_USER_CONF}"; fi ; diff --git a/script/generate-mlperf-inference-user-conf/customize.py b/script/generate-mlperf-inference-user-conf/customize.py index ec1a5fab7..a9f8e3eaa 100644 --- a/script/generate-mlperf-inference-user-conf/customize.py +++ b/script/generate-mlperf-inference-user-conf/customize.py @@ -356,13 +356,13 @@ def preprocess(i): max_duration_ranging_s * 1000) # in milliseconds if scenario == "MultiStream" or scenario == "SingleStream": - if env.get('MLC_MLPERF_USE_MAX_DURATION', 'yes').lower() not in ["no", "false", "0"] and env.get( - 'MLC_MLPERF_MODEL_EQUAL_ISSUE_MODE', 'no').lower() not in ["yes", "1", "true"]: - user_conf += ml_model_name + "." + scenario + \ - f".max_duration = {max_duration_valid}" + "\n" - elif env.get('MLC_MLPERF_INFERENCE_MIN_DURATION', '') != '': + if env.get('MLC_MLPERF_INFERENCE_MIN_DURATION', '') != '': user_conf += ml_model_name + "." + scenario + ".min_duration = " + \ env['MLC_MLPERF_INFERENCE_MIN_DURATION'] + " \n" + elif not is_false(env.get('MLC_MLPERF_USE_MAX_DURATION', 'yes')) and not is_true(env.get( + 'MLC_MLPERF_MODEL_EQUAL_ISSUE_MODE', 'no')): + user_conf += ml_model_name + "." + scenario + \ + f".max_duration = {max_duration_valid}" + "\n" if scenario == "MultiStream": user_conf += ml_model_name + "." + scenario + ".min_query_count = " + \ env.get( diff --git a/script/install-tensorflow-from-src/meta.yaml b/script/install-tensorflow-from-src/meta.yaml index 30821bb38..c219d1179 100644 --- a/script/install-tensorflow-from-src/meta.yaml +++ b/script/install-tensorflow-from-src/meta.yaml @@ -344,3 +344,51 @@ versions: version: 5.0.0 env: MLC_GIT_CHECKOUT: v2.9.0 + v2.12.0: + deps: + - names: + - python + - python3 + tags: get,python + version_max: 3.11.999 + version_max_usable: 3.11.12 + version_min: 3.8.0 + - tags: get,gcc + version_max: '12.9' + version_min: '9' + - tags: get,bazel + version: 5.3.0 + env: + MLC_GIT_CHECKOUT: v2.12.0 + v2.15.0: + deps: + - names: + - python + - python3 + tags: get,python + version_max: 3.11.999 + version_max_usable: 3.11.12 + version_min: 3.9.0 + - tags: get,llvm + version_max: '16.9.999' + version_min: '16.0.0' + - tags: get,bazel + version: 6.1.0 + env: + MLC_GIT_CHECKOUT: v2.15.0 + v2.18.0: + deps: + - names: + - python + - python3 + tags: get,python + version_max: 3.12.999 + version_max_usable: 3.12.12 + version_min: 3.9.0 + - tags: get,llvm + version_max: '17.9.999' + version_min: '17.0.6' + - tags: get,bazel + version: 6.5.0 + env: + MLC_GIT_CHECKOUT: v2.18.0 diff --git a/script/run-mlperf-inference-app/meta.yaml b/script/run-mlperf-inference-app/meta.yaml index bf8b741a1..b19362ecc 100644 --- a/script/run-mlperf-inference-app/meta.yaml +++ b/script/run-mlperf-inference-app/meta.yaml @@ -67,6 +67,7 @@ input_mapping: imagenet_path: IMAGENET_PATH implementation: MLC_MLPERF_IMPLEMENTATION lang: MLC_MLPERF_IMPLEMENTATION + min_duration: MLC_MLPERF_INFERENCE_MIN_DURATION min_query_count: MLC_MLPERF_INFERENCE_MIN_QUERY_COUNT max_query_count: MLC_MLPERF_INFERENCE_MAX_QUERY_COUNT mode: MLC_MLPERF_LOADGEN_MODE diff --git a/script/run-mlperf-power-server/power-server.conf b/script/run-mlperf-power-server/power-server.conf new file mode 100644 index 000000000..70797bd62 --- /dev/null +++ b/script/run-mlperf-power-server/power-server.conf @@ -0,0 +1,19 @@ +[server] +ntpserver = time.google.com +listen = 0.0.0.0 4950 + +[ptd] +ptd = C:\Users\arjun\CM\repos\local\cache\5a0a52d578724774\repo\PTD\binaries\ptd-windows-x86.exe +analyzerCount = 2 + +[analyzer1] +interfaceflag = -y +devicetype = 49 +deviceport = C3YD21068E +networkport = 8888 + +[analyzer2] +interfaceflag = -g +devicetype = 8 +deviceport = 20 +networkport = 8889