From 59ed8667a7011633f1490bc4c0980bf6b66ee24a Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Sat, 15 Feb 2025 17:36:36 +0530 Subject: [PATCH 01/40] Fix a bug in reuse_existing_container --- automation/script/docker.py | 2 +- automation/script/docker_utils.py | 1 - script/get-cuda-devices/detect.sh | 2 +- script/run-docker-container/customize.py | 4 +++- script/run-mlperf-inference-app/customize.py | 2 -- 5 files changed, 5 insertions(+), 6 deletions(-) diff --git a/automation/script/docker.py b/automation/script/docker.py index bebee59e1..1e762e348 100644 --- a/automation/script/docker.py +++ b/automation/script/docker.py @@ -371,7 +371,7 @@ def docker_run(self_module, i): # Execute the Docker container mlc_docker_input = { - 'action': 'run', 'automation': 'script', 'tags': 'run,docker,container', + 'action': 'run', 'target': 'script', 'tags': 'run,docker,container', 'rebuild': rebuild_docker_image, 'env': env, 'mounts': mounts, 'script_tags': i.get('tags'), 'run_cmd': final_run_cmd, 'v': verbose, diff --git a/automation/script/docker_utils.py b/automation/script/docker_utils.py index 074c8312c..621db878a 100644 --- a/automation/script/docker_utils.py +++ b/automation/script/docker_utils.py @@ -377,7 +377,6 @@ def get_docker_default(key): "use_host_user_id": True, "use_host_group_id": True, "keep_detached": False, - "reuse_existing": True } if key in defaults: return defaults[key] diff --git a/script/get-cuda-devices/detect.sh b/script/get-cuda-devices/detect.sh index 9de8aa64b..95b170267 100644 --- a/script/get-cuda-devices/detect.sh +++ b/script/get-cuda-devices/detect.sh @@ -1,4 +1,4 @@ #!/bin/bash ${MLC_PYTHON_BIN_WITH_PATH} ${MLC_TMP_CURRENT_SCRIPT_PATH}/detect.py -test $? -eq 0 || exit $? +test $? -eq 0 || exit 11 diff --git a/script/run-docker-container/customize.py b/script/run-docker-container/customize.py index 41660a001..a0e11c0b5 100644 --- a/script/run-docker-container/customize.py +++ b/script/run-docker-container/customize.py @@ -88,6 +88,8 @@ def preprocess(i): if existing_container_id: print( f"""Not using existing container {existing_container_id} as env['MLC_DOCKER_REUSE_EXISTING_CONTAINER'] is not set""") + else: + print("No existing container") if env.get('MLC_DOCKER_CONTAINER_ID', '') != '': del (env['MLC_DOCKER_CONTAINER_ID']) # not valid ID @@ -120,7 +122,6 @@ def preprocess(i): # elif recreate_image == "yes": # env['MLC_DOCKER_IMAGE_RECREATE'] = "no" - return {'return': 0} @@ -287,6 +288,7 @@ def postprocess(i): print(f"Error Output: {e.stderr}") return {'return': 1, 'error': e.stderr} + docker_out = result.stdout # if docker_out != 0: # return {'return': docker_out, 'error': f""{env['MLC_CONTAINER_TOOL']} diff --git a/script/run-mlperf-inference-app/customize.py b/script/run-mlperf-inference-app/customize.py index 885d1d8b3..5c3932a3d 100644 --- a/script/run-mlperf-inference-app/customize.py +++ b/script/run-mlperf-inference-app/customize.py @@ -281,8 +281,6 @@ def preprocess(i): mlc = i['automation'].action_object - # print(ii) - # return {'return': 1} r = mlc.access(ii) if r['return'] > 0: return r From 3e56970ad60537734ff13daba4e026dda15d303b Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Sat, 15 Feb 2025 12:06:54 +0000 Subject: [PATCH 02/40] [Automated Commit] Format Codebase [skip ci] --- script/run-docker-container/customize.py | 1 - 1 file changed, 1 deletion(-) diff --git a/script/run-docker-container/customize.py b/script/run-docker-container/customize.py index a0e11c0b5..a8b92139b 100644 --- a/script/run-docker-container/customize.py +++ b/script/run-docker-container/customize.py @@ -288,7 +288,6 @@ def postprocess(i): print(f"Error Output: {e.stderr}") return {'return': 1, 'error': e.stderr} - docker_out = result.stdout # if docker_out != 0: # return {'return': docker_out, 'error': f""{env['MLC_CONTAINER_TOOL']} From 30b2d06772c23749a78c36f865ff56076e7aefb5 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Sat, 15 Feb 2025 17:40:59 +0530 Subject: [PATCH 03/40] Fix dummy measurements file generation --- script/generate-mlperf-inference-submission/customize.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/script/generate-mlperf-inference-submission/customize.py b/script/generate-mlperf-inference-submission/customize.py index 277b52933..d19015178 100644 --- a/script/generate-mlperf-inference-submission/customize.py +++ b/script/generate-mlperf-inference-submission/customize.py @@ -552,9 +552,9 @@ def generate_submission(env, state, inp, submission_division): target_measurement_json_path) / "model-info.json" shutil.copy(measurements_json_path, destination) - else: + elif mode == 'performance': print( - f"Warning: measurements.json file not present, creating a dummy measurements.json in path {measurements_json_path}") + f"Warning: measurements.json file not present from perf run, creating a dummy measurements.json in path {measurements_json_path}. Please update it later.") dummy_measurements_data = { "input_data_types": env['MLC_ML_MODEL_INPUTS_DATA_TYPE'] if env.get('MLC_ML_MODEL_INPUTS_DATA_TYPE') else "TBD", "retraining": env['MLC_ML_MODEL_RETRAINING'] if env.get('MLC_ML_MODEL_RETRAINING') else "TBD", @@ -563,7 +563,7 @@ def generate_submission(env, state, inp, submission_division): "weight_transformations": env['MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS'] if env.get('MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS') else "TBD" } with open(measurements_json_path, 'w') as json_file: - json.dump(data, json_file, indent=4) + json.dump(dummy_measurements_data, json_file, indent=4) files = [] readme = False From 829b7319ca6569da23502adc84b15f870a67776f Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Sat, 15 Feb 2025 12:11:23 +0000 Subject: [PATCH 04/40] [Automated Commit] Format Codebase [skip ci] --- script/generate-mlperf-inference-submission/customize.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/script/generate-mlperf-inference-submission/customize.py b/script/generate-mlperf-inference-submission/customize.py index d19015178..5a7258701 100644 --- a/script/generate-mlperf-inference-submission/customize.py +++ b/script/generate-mlperf-inference-submission/customize.py @@ -563,7 +563,8 @@ def generate_submission(env, state, inp, submission_division): "weight_transformations": env['MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS'] if env.get('MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS') else "TBD" } with open(measurements_json_path, 'w') as json_file: - json.dump(dummy_measurements_data, json_file, indent=4) + json.dump( + dummy_measurements_data, json_file, indent=4) files = [] readme = False From bb3427575f29b09bd0b241614bb0b05253ddeda1 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Sun, 16 Feb 2025 02:15:31 +0530 Subject: [PATCH 05/40] Prevent errors on get-platform-details --- script/get-platform-details/run.sh | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/script/get-platform-details/run.sh b/script/get-platform-details/run.sh index 843d1cba7..a2ebfd341 100644 --- a/script/get-platform-details/run.sh +++ b/script/get-platform-details/run.sh @@ -46,7 +46,7 @@ echo "8. numactl --hardware" >> "${OUTPUT_FILE}" if [[ ${MLC_SUDO_USER} == "yes" ]]; then echo "${MLC_SUDO} numactl --hardware" eval "${MLC_SUDO} numactl --hardware" >> "${OUTPUT_FILE}" - test $? -eq 0 || exit $? + #test $? -eq 0 || exit $? else echo "Requires SUDO permission" >> "${OUTPUT_FILE}" fi @@ -86,7 +86,7 @@ echo "15. sysctl" >> "${OUTPUT_FILE}" if [[ ${MLC_SUDO_USER} == "yes" ]]; then echo "${MLC_SUDO} sysctl -a" eval "${MLC_SUDO} sysctl -a" >> "${OUTPUT_FILE}" - test $? -eq 0 || exit $? + #test $? -eq 0 || exit $? else echo "Requires SUDO permission" >> "${OUTPUT_FILE}" fi @@ -94,12 +94,12 @@ echo "------------------------------------------------------------" >> "${OUTPUT echo "16. /sys/kernel/mm/transparent_hugepage" >> "${OUTPUT_FILE}" eval "cat /sys/kernel/mm/transparent_hugepage/enabled" >> "${OUTPUT_FILE}" -test $? -eq 0 || exit $? +#test $? -eq 0 || exit $? echo "------------------------------------------------------------" >> "${OUTPUT_FILE}" echo "17. /sys/kernel/mm/transparent_hugepage/khugepaged" >> "${OUTPUT_FILE}" eval "cat /sys/kernel/mm/transparent_hugepage/khugepaged/defrag" >> "${OUTPUT_FILE}" -test $? -eq 0 || exit $? +#test $? -eq 0 || exit $? echo "------------------------------------------------------------" >> "${OUTPUT_FILE}" echo "18. OS release" >> "${OUTPUT_FILE}" @@ -114,7 +114,7 @@ echo "------------------------------------------------------------" >> "${OUTPUT echo "20. /sys/devices/virtual/dmi/id" >> "${OUTPUT_FILE}" eval "ls /sys/devices/virtual/dmi/id" >> "${OUTPUT_FILE}" -test $? -eq 0 || exit $? +#test $? -eq 0 || exit $? echo "------------------------------------------------------------" >> "${OUTPUT_FILE}" echo "21. dmidecode" >> "${OUTPUT_FILE}" From e1293e950fd6dd0c0a0476803e8b6e2ad80c8111 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Sun, 16 Feb 2025 11:54:49 +0000 Subject: [PATCH 06/40] Make noinfer-scenario results the default for mlperf-inference (#230) * Make noinfer_scenario results the default --- script/app-mlperf-inference/meta.yaml | 2 +- script/generate-mlperf-inference-submission/customize.py | 5 ++++- script/generate-mlperf-inference-submission/meta.yaml | 4 ++-- script/get-ml-model-llama2/meta.yaml | 7 +++++++ script/get-ml-model-llama3/meta.yaml | 7 +++++++ script/preprocess-mlperf-inference-submission/customize.py | 2 ++ script/preprocess-mlperf-inference-submission/meta.yaml | 3 +++ 7 files changed, 26 insertions(+), 4 deletions(-) diff --git a/script/app-mlperf-inference/meta.yaml b/script/app-mlperf-inference/meta.yaml index b1abbe02d..f041b8f66 100644 --- a/script/app-mlperf-inference/meta.yaml +++ b/script/app-mlperf-inference/meta.yaml @@ -121,7 +121,7 @@ deps: posthook_deps: - tags: get,mlperf,sut,description #populate system meta information like framework - tags: get,platform,details - enable_if_any_env: + enable_if_env: MLC_GET_PLATFORM_DETAILS: - yes skip_if_env: diff --git a/script/generate-mlperf-inference-submission/customize.py b/script/generate-mlperf-inference-submission/customize.py index 5a7258701..7dda1acbb 100644 --- a/script/generate-mlperf-inference-submission/customize.py +++ b/script/generate-mlperf-inference-submission/customize.py @@ -131,12 +131,13 @@ def generate_submission(env, state, inp, submission_division): system_meta_tmp['system_type'] = env['MLC_MLPERF_SUBMISSION_CATEGORY'].replace( "-", ",") - duplicate = ( + '''duplicate = ( env.get( 'MLC_MLPERF_DUPLICATE_SCENARIO_RESULTS', 'no') in [ "yes", "True"]) + ''' if division not in ['open', 'closed']: return {'return': 1, 'error': '"division" must be "open" or "closed"'} @@ -361,6 +362,7 @@ def generate_submission(env, state, inp, submission_division): compliance_scenario_path = os.path.join( compliance_model_path, scenario) + ''' if duplicate and scenario == 'singlestream': if not os.path.exists(os.path.join( result_model_path, "offline")): @@ -378,6 +380,7 @@ def generate_submission(env, state, inp, submission_division): result_scenario_path, os.path.join( result_model_path, "multistream")) scenarios.append("multistream") + ''' modes = [ f for f in os.listdir(result_scenario_path) if not os.path.isfile( diff --git a/script/generate-mlperf-inference-submission/meta.yaml b/script/generate-mlperf-inference-submission/meta.yaml index c249d7243..9448db682 100644 --- a/script/generate-mlperf-inference-submission/meta.yaml +++ b/script/generate-mlperf-inference-submission/meta.yaml @@ -67,11 +67,11 @@ input_mapping: dashboard_wb_project: MLC_MLPERF_DASHBOARD_WANDB_PROJECT device: MLC_MLPERF_DEVICE division: MLC_MLPERF_SUBMISSION_DIVISION - duplicate: MLC_MLPERF_DUPLICATE_SCENARIO_RESULTS extra_checker_args: MLC_MLPERF_SUBMISSION_CHECKER_EXTRA_ARG hw_name: MLC_HW_NAME hw_notes_extra: MLC_MLPERF_SUT_HW_NOTES_EXTRA - infer_scenario_results: MLC_MLPERF_DUPLICATE_SCENARIO_RESULTS + noinfer_scenario_results: MLC_MLPERF_NOINFER_SCENARIO_RESULTS + noinfer_low_accuracy_results: MLC_MLPERF_NOINFER_LOW_ACCURACY_RESULTS power_settings_file: MLC_MLPERF_POWER_SETTINGS_FILE_PATH preprocess: MLC_RUN_MLPERF_SUBMISSION_PREPROCESSOR preprocess_submission: MLC_RUN_MLPERF_SUBMISSION_PREPROCESSOR diff --git a/script/get-ml-model-llama2/meta.yaml b/script/get-ml-model-llama2/meta.yaml index ed7477c04..fd9b43e68 100644 --- a/script/get-ml-model-llama2/meta.yaml +++ b/script/get-ml-model-llama2/meta.yaml @@ -92,6 +92,13 @@ variations: prehook_deps: - tags: get,rclone-config,_mlperf-llama2 force_cache: yes + enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - yes + - tags: get,rclone + enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - yes hf: group: download-source env: diff --git a/script/get-ml-model-llama3/meta.yaml b/script/get-ml-model-llama3/meta.yaml index 673f34c49..2edbeaa3b 100644 --- a/script/get-ml-model-llama3/meta.yaml +++ b/script/get-ml-model-llama3/meta.yaml @@ -54,6 +54,13 @@ variations: prehook_deps: - tags: get,rclone-config,_mlperf-llama3-1 force_cache: true + enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - yes + - tags: get,rclone + enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - yes env: MLC_DOWNLOAD_SRC: mlcommons hf: diff --git a/script/preprocess-mlperf-inference-submission/customize.py b/script/preprocess-mlperf-inference-submission/customize.py index 58f38524b..a394b4446 100644 --- a/script/preprocess-mlperf-inference-submission/customize.py +++ b/script/preprocess-mlperf-inference-submission/customize.py @@ -35,6 +35,8 @@ def preprocess(i): extra_args = [] if is_true(env.get('MLC_MLPERF_NOINFER_LOW_ACCURACY_RESULTS')): extra_args.append("--noinfer-low-accuracy-results") + if is_true(env.get('MLC_MLPERF_NOINFER_SCENARIO_RESULTS')): + extra_args.append("--noinfer-scenario-results") if is_true(env.get('MLC_MLPERF_NODELETE_EMPTY_DIRS')): extra_args.append("--nodelete-empty-dirs") if is_true(env.get('MLC_MLPERF_NOMOVE_FAILED_TO_OPEN')): diff --git a/script/preprocess-mlperf-inference-submission/meta.yaml b/script/preprocess-mlperf-inference-submission/meta.yaml index 4e5d3023e..a10167acc 100644 --- a/script/preprocess-mlperf-inference-submission/meta.yaml +++ b/script/preprocess-mlperf-inference-submission/meta.yaml @@ -25,6 +25,9 @@ input_mapping: version: MLC_MLPERF_SUBMISSION_CHECKER_VERSION submitter: MLC_MLPERF_SUBMITTER submission_preprocessor_args: MLC_MLPERF_PREPROCESS_SUBMISSION_EXTRA_ARGS +default_env: + MLC_MLPERF_NOINFER_LOW_ACCURACY_RESULTS: True + MLC_MLPERF_NOINFER_SCENARIO_RESULTS: True tags: - run - mlc From e98137ebbf4a43eef6c2f13de79e47324f6346f9 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Sun, 16 Feb 2025 17:40:06 +0000 Subject: [PATCH 07/40] Use inference dev branch for submission preprocess (#231) --- script/preprocess-mlperf-inference-submission/meta.yaml | 3 ++- script/run-mlperf-inference-mobilenet-models/customize.py | 6 +++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/script/preprocess-mlperf-inference-submission/meta.yaml b/script/preprocess-mlperf-inference-submission/meta.yaml index a10167acc..5381b36dd 100644 --- a/script/preprocess-mlperf-inference-submission/meta.yaml +++ b/script/preprocess-mlperf-inference-submission/meta.yaml @@ -12,7 +12,8 @@ deps: - names: - inference-src - submission-checker-src - tags: get,mlcommons,inference,src + tags: get,mlcommons,inference,src,_branch.dev + version: custom - names: - get-mlperf-submission-dir skip_if_env: diff --git a/script/run-mlperf-inference-mobilenet-models/customize.py b/script/run-mlperf-inference-mobilenet-models/customize.py index 171b6e17c..6641448e2 100644 --- a/script/run-mlperf-inference-mobilenet-models/customize.py +++ b/script/run-mlperf-inference-mobilenet-models/customize.py @@ -134,8 +134,8 @@ def preprocess(i): mlc_input = { 'action': 'run', - 'automation': 'script', - 'tags': f'generate-run-cmds,mlperf,inference,{var}', + 'target': 'script', + 'tags': f'run-mlperf,mlperf,inference,{var}', 'quiet': True, 'env': env, 'input': inp, @@ -200,7 +200,7 @@ def preprocess(i): 'tags': 'get,preprocessed,dataset,_for.mobilenet', 'quiet': True, 'v': verbose, - 'f': 'True' + 'f': True } r = mlc.access(clean_input) # if r['return'] > 0: From b1272b291ef7b44a92549b07b685a8b31db886a5 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Sun, 16 Feb 2025 22:57:13 +0000 Subject: [PATCH 08/40] Fix mlcr usage in docs and actions (#232) * Support armnn download on macos * Fix SW/HW notes * Fix commands with mlcr --- ...t-amd-mlperf-inference-implementations.yml | 4 +-- .../test-image-classification-onnx.yml | 2 +- ...intel-mlperf-inference-implementations.yml | 4 +-- .../workflows/test-mlc-script-features.yml | 26 ++++++++-------- .../test-mlperf-inference-abtf-poc.yml | 2 +- ...bert-deepsparse-tf-onnxruntime-pytorch.yml | 6 ++-- .../workflows/test-mlperf-inference-dlrm.yml | 4 +-- .../workflows/test-mlperf-inference-gptj.yml | 6 ++-- .../test-mlperf-inference-llama2.yml | 6 ++-- .../test-mlperf-inference-mixtral.yml | 6 ++-- ...lperf-inference-mlcommons-cpp-resnet50.yml | 6 ++-- .../test-mlperf-inference-resnet50.yml | 6 ++-- .../test-mlperf-inference-retinanet.yml | 6 ++-- .../workflows/test-mlperf-inference-rgat.yml | 4 +-- .../workflows/test-mlperf-inference-rnnt.yml | 2 +- .../workflows/test-mlperf-inference-sdxl.yaml | 6 ++-- .../test-mlperf-inference-tvm-resnet50.yml | 4 +-- ...adgen-onnx-huggingface-bert-fp32-squad.yml | 2 +- ...vidia-mlperf-inference-implementations.yml | 8 ++--- .../workflows/test-qaic-compute-sdk-build.yml | 4 +-- .github/workflows/test-qaic-software-kit.yml | 4 +-- .github/workflows/test-scc24-sdxl.yaml | 16 +++++----- .../README_aws_dl2q.24xlarge.md | 16 +++++----- script/app-mlperf-inference/meta.yaml | 2 +- .../run-template.sh | 18 +++++------ script/benchmark-program/customize.py | 2 +- script/extract-file/customize.py | 2 +- .../customize.py | 8 ++--- script/get-lib-armnn/customize.py | 2 +- script/get-platform-details/README-EXTRA.md | 2 +- script/get-rocm-devices/README.md | 2 +- .../run-all-mlperf-models/run-bert-macos.sh | 10 +++---- script/run-all-mlperf-models/run-bert.sh | 10 +++---- .../run-cpp-implementation.sh | 30 +++++++++---------- .../run-mobilenet-models.sh | 12 ++++---- .../run-all-mlperf-models/run-nvidia-4090.sh | 4 +-- .../run-all-mlperf-models/run-nvidia-a100.sh | 4 +-- script/run-all-mlperf-models/run-nvidia-t4.sh | 4 +-- .../run-all-mlperf-models/run-pruned-bert.sh | 4 +-- .../run-reference-models.sh | 20 ++++++------- .../run-resnet50-macos.sh | 10 +++---- script/run-all-mlperf-models/run-resnet50.sh | 10 +++---- script/run-all-mlperf-models/run-retinanet-sh | 10 +++---- script/run-all-mlperf-models/template.sh | 10 +++---- script/run-docker-container/customize.py | 2 +- .../meta.yaml | 2 +- script/run-terraform/README-about.md | 2 +- 47 files changed, 164 insertions(+), 168 deletions(-) diff --git a/.github/workflows/test-amd-mlperf-inference-implementations.yml b/.github/workflows/test-amd-mlperf-inference-implementations.yml index 4c4b6f749..bc01ad20c 100644 --- a/.github/workflows/test-amd-mlperf-inference-implementations.yml +++ b/.github/workflows/test-amd-mlperf-inference-implementations.yml @@ -22,5 +22,5 @@ jobs: export MLC_REPOS=$HOME/GH_MLC pip install --upgrade mlc-scripts mlc pull repo - mlcr --tags=run-mlperf,inference,_all-scenarios,_full,_r4.1-dev --execution_mode=valid --pull_changes=yes --pull_inference_changes=yes --model=${{ matrix.model }} --submitter="MLCommons" --hw_name=IntelSPR.24c --implementation=amd --backend=pytorch --category=datacenter --division=open --scenario=Offline --docker_dt=yes --docker_it=no --docker_mlc_repo=gateoverflow@mlperf-automations --docker_mlc_repo_branch=dev --adr.compiler.tags=gcc --device=rocm --use_dataset_from_host=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --clean --docker --quiet --docker_skip_run_cmd=yes - # mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_unofficial_submissions_v5.0 --repo_branch=dev --commit_message="Results from GH action on SPR.24c" --quiet --submission_dir=$HOME/gh_action_submissions --hw_name=IntelSPR.24c + mlcr run-mlperf,inference,_all-scenarios,_full,_r4.1-dev --execution_mode=valid --pull_changes=yes --pull_inference_changes=yes --model=${{ matrix.model }} --submitter="MLCommons" --hw_name=IntelSPR.24c --implementation=amd --backend=pytorch --category=datacenter --division=open --scenario=Offline --docker_dt=yes --docker_it=no --docker_mlc_repo=gateoverflow@mlperf-automations --docker_mlc_repo_branch=dev --adr.compiler.tags=gcc --device=rocm --use_dataset_from_host=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --clean --docker --quiet --docker_skip_run_cmd=yes + # mlcr push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_unofficial_submissions_v5.0 --repo_branch=dev --commit_message="Results from GH action on SPR.24c" --quiet --submission_dir=$HOME/gh_action_submissions --hw_name=IntelSPR.24c diff --git a/.github/workflows/test-image-classification-onnx.yml b/.github/workflows/test-image-classification-onnx.yml index 4edbb61b6..121c111ef 100644 --- a/.github/workflows/test-image-classification-onnx.yml +++ b/.github/workflows/test-image-classification-onnx.yml @@ -38,4 +38,4 @@ jobs: mlc pull repo ${{ github.event.pull_request.head.repo.html_url }} --branch=${{ github.event.pull_request.head.ref }} - name: Test image classification with ONNX run: | - mlcr --tags=python,app,image-classification,onnx --quiet + mlcr python,app,image-classification,onnx --quiet diff --git a/.github/workflows/test-intel-mlperf-inference-implementations.yml b/.github/workflows/test-intel-mlperf-inference-implementations.yml index 0041f9762..9e4d03e26 100644 --- a/.github/workflows/test-intel-mlperf-inference-implementations.yml +++ b/.github/workflows/test-intel-mlperf-inference-implementations.yml @@ -22,5 +22,5 @@ jobs: export MLC_REPOS=$HOME/GH_MLC pip install --upgrade mlc-scripts pip install tabulate - mlcr --tags=run-mlperf,inference,_all-scenarios,_submission,_full,_r4.1-dev --preprocess_submission=yes --execution_mode=valid --pull_changes=yes --pull_inference_changes=yes --model=${{ matrix.model }} --submitter="MLCommons" --hw_name=IntelSPR.24c --implementation=intel --backend=pytorch --category=datacenter --division=open --scenario=Offline --docker_dt=yes --docker_it=no --docker_mlc_repo=mlcommons@mlperf-automations --docker_mlc_repo_branch=dev --adr.compiler.tags=gcc --device=cpu --use_dataset_from_host=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --clean --docker --quiet - mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_unofficial_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from GH action on SPR.24c" --quiet --submission_dir=$HOME/gh_action_submissions --hw_name=IntelSPR.24c + mlcr run-mlperf,inference,_all-scenarios,_submission,_full,_r4.1-dev --preprocess_submission=yes --execution_mode=valid --pull_changes=yes --pull_inference_changes=yes --model=${{ matrix.model }} --submitter="MLCommons" --hw_name=IntelSPR.24c --implementation=intel --backend=pytorch --category=datacenter --division=open --scenario=Offline --docker_dt=yes --docker_it=no --docker_mlc_repo=mlcommons@mlperf-automations --docker_mlc_repo_branch=dev --adr.compiler.tags=gcc --device=cpu --use_dataset_from_host=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --clean --docker --quiet + mlcr push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_unofficial_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from GH action on SPR.24c" --quiet --submission_dir=$HOME/gh_action_submissions --hw_name=IntelSPR.24c diff --git a/.github/workflows/test-mlc-script-features.yml b/.github/workflows/test-mlc-script-features.yml index bca961c1b..05d62e2ae 100644 --- a/.github/workflows/test-mlc-script-features.yml +++ b/.github/workflows/test-mlc-script-features.yml @@ -35,12 +35,12 @@ jobs: - name: Test Python venv run: | - mlcr --tags=install,python-venv --name=test --quiet + mlcr install,python-venv --name=test --quiet mlc search cache --tags=get,python,virtual,name-test --quiet - name: Test variations run: | - mlcr --tags=get,dataset,preprocessed,imagenet,_NHWC --quiet + mlcr get,dataset,preprocessed,imagenet,_NHWC --quiet mlc search cache --tags=get,dataset,preprocessed,imagenet,-_NCHW mlc search cache --tags=get,dataset,preprocessed,imagenet,-_NHWC @@ -48,17 +48,17 @@ jobs: continue-on-error: true if: runner.os == 'linux' run: | - mlcr --tags=get,generic-python-lib,_package.scipy --version=1.9.3 --quiet + mlcr get,generic-python-lib,_package.scipy --version=1.9.3 --quiet test $? -eq 0 || exit $? - mlcr --tags=get,generic-python-lib,_package.scipy --version=1.9.2 --quiet + mlcr get,generic-python-lib,_package.scipy --version=1.9.2 --quiet test $? -eq 0 || exit $? # Need to add find cache here - # mlcr --tags=get,generic-python-lib,_package.scipy --version=1.9.3 --quiet --only_execute_from_cache=True + # mlcr get,generic-python-lib,_package.scipy --version=1.9.3 --quiet --only_execute_from_cache=True # test $? -eq 0 || exit 0 - name: Test python install from src run: | - mlcr --tags=python,src,install,_shared --version=3.9.10 --quiet + mlcr python,src,install,_shared --version=3.9.10 --quiet mlc search cache --tags=python,src,install,_shared,version-3.9.10 test_docker: @@ -81,11 +81,11 @@ jobs: - name: Run docker container from dockerhub on linux run: | - mlcr --tags=run,docker,container --adr.compiler.tags=gcc --docker_mlc_repo=mlcommons@mlperf-automations --docker_mlc_repo_branch=dev --image_name=cm-script-app-image-classification-onnx-py --env.MLC_DOCKER_RUN_SCRIPT_TAGS=app,image-classification,onnx,python --env.MLC_DOCKER_IMAGE_BASE=ubuntu:22.04 --env.MLC_DOCKER_IMAGE_REPO=cknowledge --quiet + mlcr run,docker,container --adr.compiler.tags=gcc --docker_mlc_repo=mlcommons@mlperf-automations --docker_mlc_repo_branch=dev --image_name=cm-script-app-image-classification-onnx-py --env.MLC_DOCKER_RUN_SCRIPT_TAGS=app,image-classification,onnx,python --env.MLC_DOCKER_IMAGE_BASE=ubuntu:22.04 --env.MLC_DOCKER_IMAGE_REPO=cknowledge --quiet - name: Run docker container locally on linux run: | - mlcr --tags=run,docker,container --adr.compiler.tags=gcc --docker_mlc_repo=mlcommons@mlperf-automations --docker_mlc_repo_branch=dev --image_name=mlc-script-app-image-classification-onnx-py --env.MLC_DOCKER_RUN_SCRIPT_TAGS=app,image-classification,onnx,python --env.MLC_DOCKER_IMAGE_BASE=ubuntu:22.04 --env.MLC_DOCKER_IMAGE_REPO=local --quiet + mlcr run,docker,container --adr.compiler.tags=gcc --docker_mlc_repo=mlcommons@mlperf-automations --docker_mlc_repo_branch=dev --image_name=mlc-script-app-image-classification-onnx-py --env.MLC_DOCKER_RUN_SCRIPT_TAGS=app,image-classification,onnx,python --env.MLC_DOCKER_IMAGE_BASE=ubuntu:22.04 --env.MLC_DOCKER_IMAGE_REPO=local --quiet test_mlperf_retinanet_cpp_venv: runs-on: ubuntu-latest @@ -107,15 +107,15 @@ jobs: - name: Run MLPerf Inference Retinanet with native and virtual Python run: | - mlcr --tags=app,mlperf,inference,generic,_cpp,_retinanet,_onnxruntime,_cpu --adr.python.version_min=3.8 --adr.compiler.tags=gcc --adr.openimages-preprocessed.tags=_50 --scenario=Offline --mode=accuracy --test_query_count=10 --rerun --quiet + mlcr app,mlperf,inference,generic,_cpp,_retinanet,_onnxruntime,_cpu --adr.python.version_min=3.8 --adr.compiler.tags=gcc --adr.openimages-preprocessed.tags=_50 --scenario=Offline --mode=accuracy --test_query_count=10 --rerun --quiet - mlcr --tags=app,mlperf,inference,generic,_cpp,_retinanet,_onnxruntime,_cpu --adr.python.version_min=3.8 --adr.compiler.tags=gcc --adr.openimages-preprocessed.tags=_50 --scenario=Offline --mode=performance --test_query_count=10 --rerun --quiet + mlcr app,mlperf,inference,generic,_cpp,_retinanet,_onnxruntime,_cpu --adr.python.version_min=3.8 --adr.compiler.tags=gcc --adr.openimages-preprocessed.tags=_50 --scenario=Offline --mode=performance --test_query_count=10 --rerun --quiet - mlcr --tags=install,python-venv --version=3.10.8 --name=mlperf --quiet + mlcr install,python-venv --version=3.10.8 --name=mlperf --quiet export MLC_SCRIPT_EXTRA_CMD="--adr.python.name=mlperf" - mlcr --tags=run,mlperf,inference,_submission,_short --adr.python.version_min=3.8 --adr.compiler.tags=gcc --adr.openimages-preprocessed.tags=_50 --submitter=MLCommons --implementation=cpp --hw_name=default --model=retinanet --backend=onnxruntime --device=cpu --scenario=Offline --quiet + mlcr run,mlperf,inference,_submission,_short --adr.python.version_min=3.8 --adr.compiler.tags=gcc --adr.openimages-preprocessed.tags=_50 --submitter=MLCommons --implementation=cpp --hw_name=default --model=retinanet --backend=onnxruntime --device=cpu --scenario=Offline --quiet # Step for Linux/MacOS - name: Randomly Execute Step (Linux/MacOS) @@ -160,4 +160,4 @@ jobs: git config --global credential.https://github.com.helper "!gh auth git-credential" git config --global credential.https://gist.github.com.helper "" git config --global credential.https://gist.github.com.helper "!gh auth git-credential" - mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from R50 GH action on ${{ matrix.os }}" --quiet + mlcr push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from R50 GH action on ${{ matrix.os }}" --quiet diff --git a/.github/workflows/test-mlperf-inference-abtf-poc.yml b/.github/workflows/test-mlperf-inference-abtf-poc.yml index cc2ec9868..fadea97da 100644 --- a/.github/workflows/test-mlperf-inference-abtf-poc.yml +++ b/.github/workflows/test-mlperf-inference-abtf-poc.yml @@ -114,4 +114,4 @@ jobs: - name: Test MLPerf Inference ABTF POC using ${{ matrix.backend }} on ${{ matrix.os }} run: | - mlcr --tags=run-abtf,inference,_poc-demo --test_query_count=2 --adr.cocoeval.version_max=1.5.7 --adr.cocoeval.version_max_usable=1.5.7 --quiet ${{ matrix.extra-args }} ${{ matrix.docker }} -v + mlcr run-abtf,inference,_poc-demo --test_query_count=2 --adr.cocoeval.version_max=1.5.7 --adr.cocoeval.version_max_usable=1.5.7 --quiet ${{ matrix.extra-args }} ${{ matrix.docker }} -v diff --git a/.github/workflows/test-mlperf-inference-bert-deepsparse-tf-onnxruntime-pytorch.yml b/.github/workflows/test-mlperf-inference-bert-deepsparse-tf-onnxruntime-pytorch.yml index 2a89dbe6f..73f0d4adb 100644 --- a/.github/workflows/test-mlperf-inference-bert-deepsparse-tf-onnxruntime-pytorch.yml +++ b/.github/workflows/test-mlperf-inference-bert-deepsparse-tf-onnxruntime-pytorch.yml @@ -43,11 +43,11 @@ jobs: - name: Test MLPerf Inference Bert ${{ matrix.backend }} on ${{ matrix.os }} if: matrix.os == 'windows-latest' run: | - mlcr --tags=run,mlperf,inference,generate-run-cmds,_submission,_short --submitter="MLCommons" --hw_name=gh_${{ matrix.os }} --model=bert-99 --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=5 --adr.loadgen.tags=_from-pip --pip_loadgen=yes --precision=${{ matrix.precision }} --target_qps=1 -v --quiet + mlcr run,mlperf,inference,generate-run-cmds,_submission,_short --submitter="MLCommons" --hw_name=gh_${{ matrix.os }} --model=bert-99 --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=5 --adr.loadgen.tags=_from-pip --pip_loadgen=yes --precision=${{ matrix.precision }} --target_qps=1 -v --quiet - name: Test MLPerf Inference Bert ${{ matrix.backend }} on ${{ matrix.os }} if: matrix.os != 'windows-latest' run: | - mlcr --tags=run,mlperf,inference,generate-run-cmds,_submission,_short --submitter="MLCommons" --hw_name=gh_${{ matrix.os }}_x86 --model=bert-99 --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=5 --precision=${{ matrix.precision }} --target_qps=1 -v --quiet + mlcr run,mlperf,inference,generate-run-cmds,_submission,_short --submitter="MLCommons" --hw_name=gh_${{ matrix.os }}_x86 --model=bert-99 --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=5 --precision=${{ matrix.precision }} --target_qps=1 -v --quiet - name: Randomly Execute Step id: random-check run: | @@ -77,4 +77,4 @@ jobs: git config --global credential.https://github.com.helper "!gh auth git-credential" git config --global credential.https://gist.github.com.helper "" git config --global credential.https://gist.github.com.helper "!gh auth git-credential" - mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from R50 GH action on ${{ matrix.os }}" --quiet + mlcr push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from R50 GH action on ${{ matrix.os }}" --quiet diff --git a/.github/workflows/test-mlperf-inference-dlrm.yml b/.github/workflows/test-mlperf-inference-dlrm.yml index 13bf2dbcc..c1a48a4b3 100644 --- a/.github/workflows/test-mlperf-inference-dlrm.yml +++ b/.github/workflows/test-mlperf-inference-dlrm.yml @@ -24,7 +24,7 @@ jobs: source gh_action/bin/activate export MLC_REPOS=$HOME/GH_MLC python3 -m pip install mlperf - mlcr --tags=run-mlperf,inference,_performance-only --pull_changes=yes --pull_inference_changes=yes --submitter="MLCommons" --model=dlrm-v2-99 --implementation=reference --backend=pytorch --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --docker --quiet --test_query_count=1 --target_qps=1 --docker_it=no --adr.compiler.tags=gcc --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --clean + mlcr run-mlperf,inference,_performance-only --pull_changes=yes --pull_inference_changes=yes --submitter="MLCommons" --model=dlrm-v2-99 --implementation=reference --backend=pytorch --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --docker --quiet --test_query_count=1 --target_qps=1 --docker_it=no --adr.compiler.tags=gcc --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --clean build_intel: if: github.repository_owner == 'gateoverflow_off' @@ -44,4 +44,4 @@ jobs: export MLC_REPOS=$HOME/GH_MLC python3 -m pip install mlperf mlc pull repo - mlcr --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --model=dlrm-v2-99 --implementation=intel --batch_size=1 --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --docker --quiet --test_query_count=1 --target_qps=1 --docker_it=no --docker_mlc_repo=gateoverflow@mlperf-automations --adr.compiler.tags=gcc --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --clean + mlcr run-mlperf,inference,_submission,_short --submitter="MLCommons" --model=dlrm-v2-99 --implementation=intel --batch_size=1 --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --docker --quiet --test_query_count=1 --target_qps=1 --docker_it=no --docker_mlc_repo=gateoverflow@mlperf-automations --adr.compiler.tags=gcc --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --clean diff --git a/.github/workflows/test-mlperf-inference-gptj.yml b/.github/workflows/test-mlperf-inference-gptj.yml index 341e2e818..346947611 100644 --- a/.github/workflows/test-mlperf-inference-gptj.yml +++ b/.github/workflows/test-mlperf-inference-gptj.yml @@ -5,7 +5,7 @@ name: MLPerf inference GPT-J on: schedule: - - cron: "15 19 * * *" + - cron: "15 19 1 * *" jobs: build: @@ -26,6 +26,6 @@ jobs: export MLC_REPOS=$HOME/GH_MLC python3 -m pip install --upgrade mlc-scripts mlc pull repo - mlcr --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --docker --pull_changes=yes --pull_inference_changes=yes --model=gptj-99 --backend=${{ matrix.backend }} --device=cuda --scenario=Offline --test_query_count=1 --precision=${{ matrix.precision }} --target_qps=1 --quiet --docker_it=no --docker_mlc_repo=gateoverflow@mlperf-automations --docker_mlc_repo_branch=dev --adr.compiler.tags=gcc --beam_size=1 --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --get_platform_details=yes --implementation=reference --clean - mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from self hosted Github actions - NVIDIARTX4090" --quiet --submission_dir=$HOME/gh_action_submissions + mlcr run-mlperf,inference,_submission,_short --submitter="MLCommons" --docker --pull_changes=yes --pull_inference_changes=yes --model=gptj-99 --backend=${{ matrix.backend }} --device=cuda --scenario=Offline --test_query_count=1 --precision=${{ matrix.precision }} --target_qps=1 --quiet --docker_it=no --docker_mlc_repo=gateoverflow@mlperf-automations --docker_mlc_repo_branch=dev --adr.compiler.tags=gcc --beam_size=1 --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --get_platform_details=yes --implementation=reference --clean + mlcr push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from self hosted Github actions - NVIDIARTX4090" --quiet --submission_dir=$HOME/gh_action_submissions diff --git a/.github/workflows/test-mlperf-inference-llama2.yml b/.github/workflows/test-mlperf-inference-llama2.yml index 70e4e4909..acf896b88 100644 --- a/.github/workflows/test-mlperf-inference-llama2.yml +++ b/.github/workflows/test-mlperf-inference-llama2.yml @@ -5,7 +5,7 @@ name: MLPerf inference LLAMA2-70B on: schedule: - - cron: "59 04 * * *" + - cron: "59 04 1 * *" jobs: build_reference: @@ -31,5 +31,5 @@ jobs: pip install "huggingface_hub[cli]" git config --global credential.helper store huggingface-cli login --token ${{ secrets.HF_TOKEN }} --add-to-git-credential - mlcr --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --model=llama2-70b-99 --implementation=reference --backend=${{ matrix.backend }} --precision=${{ matrix.precision }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --docker --quiet --test_query_count=1 --target_qps=0.001 --docker_it=no --docker_mlc_repo=gateoverflow@mlperf-automations --adr.compiler.tags=gcc --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --env.MLC_MLPERF_MODEL_LLAMA2_70B_DOWNLOAD_TO_HOST=yes --clean - mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from self hosted Github actions" --quiet --submission_dir=$HOME/gh_action_submissions + mlcr run-mlperf,inference,_submission,_short --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --model=llama2-70b-99 --implementation=reference --backend=${{ matrix.backend }} --precision=${{ matrix.precision }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --docker --quiet --test_query_count=1 --target_qps=0.001 --docker_it=no --docker_mlc_repo=gateoverflow@mlperf-automations --adr.compiler.tags=gcc --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --env.MLC_MLPERF_MODEL_LLAMA2_70B_DOWNLOAD_TO_HOST=yes --clean + mlcr push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from self hosted Github actions" --quiet --submission_dir=$HOME/gh_action_submissions diff --git a/.github/workflows/test-mlperf-inference-mixtral.yml b/.github/workflows/test-mlperf-inference-mixtral.yml index eb47f853f..e091d2fe7 100644 --- a/.github/workflows/test-mlperf-inference-mixtral.yml +++ b/.github/workflows/test-mlperf-inference-mixtral.yml @@ -2,7 +2,7 @@ name: MLPerf inference MIXTRAL-8x7B on: schedule: - - cron: "59 23 * * */5" # 30th minute and 20th hour => 20:30 UTC => 2 AM IST + - cron: "59 23 1 * */5" # 30th minute and 20th hour => 20:30 UTC => 2 AM IST jobs: build_reference: @@ -28,5 +28,5 @@ jobs: git config --global credential.helper store huggingface-cli login --token ${{ secrets.HF_TOKEN }} --add-to-git-credential mlc pull repo - mlcr --tags=run-mlperf,inference,_submission,_short --adr.inference-src.tags=_branch.dev --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --model=mixtral-8x7b --implementation=reference --batch_size=1 --precision=${{ matrix.precision }} --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --docker_it=no --docker_mlc_repo=gateoverflow@mlperf-automations --adr.compiler.tags=gcc --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --docker --quiet --test_query_count=3 --target_qps=0.001 --clean --env.MLC_MLPERF_MODEL_MIXTRAL_8X7B_DOWNLOAD_TO_HOST=yes --env.MLC_MLPERF_DATASET_MIXTRAL_8X7B_DOWNLOAD_TO_HOST=yes --adr.openorca-mbxp-gsm8k-combined-preprocessed.tags=_size.1 - mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from self hosted Github actions - GO-phoenix" --quiet --submission_dir=$HOME/gh_action_submissions + mlcr run-mlperf,inference,_submission,_short --adr.inference-src.tags=_branch.dev --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --model=mixtral-8x7b --implementation=reference --batch_size=1 --precision=${{ matrix.precision }} --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --docker_it=no --docker_mlc_repo=gateoverflow@mlperf-automations --adr.compiler.tags=gcc --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --docker --quiet --test_query_count=3 --target_qps=0.001 --clean --env.MLC_MLPERF_MODEL_MIXTRAL_8X7B_DOWNLOAD_TO_HOST=yes --env.MLC_MLPERF_DATASET_MIXTRAL_8X7B_DOWNLOAD_TO_HOST=yes --adr.openorca-mbxp-gsm8k-combined-preprocessed.tags=_size.1 + mlcr push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from self hosted Github actions - GO-phoenix" --quiet --submission_dir=$HOME/gh_action_submissions diff --git a/.github/workflows/test-mlperf-inference-mlcommons-cpp-resnet50.yml b/.github/workflows/test-mlperf-inference-mlcommons-cpp-resnet50.yml index f4ed3f079..737132572 100644 --- a/.github/workflows/test-mlperf-inference-mlcommons-cpp-resnet50.yml +++ b/.github/workflows/test-mlperf-inference-mlcommons-cpp-resnet50.yml @@ -41,11 +41,11 @@ jobs: - name: Test MLPerf Inference MLCommons C++ ResNet50 on ${{ matrix.os }} if: matrix.os == 'windows-latest' run: | - mlcr --tags=app,mlperf,inference,mlcommons,cpp --submitter="MLCommons" --hw_name=gh_${{ matrix.os }} --adr.loadgen.tags=_from-pip --pip_loadgen=yes -v --quiet + mlcr app,mlperf,inference,mlcommons,cpp --submitter="MLCommons" --hw_name=gh_${{ matrix.os }} --adr.loadgen.tags=_from-pip --pip_loadgen=yes -v --quiet - name: Test MLPerf Inference MLCommons C++ ResNet50 on ${{ matrix.os }} if: matrix.os != 'windows-latest' run: | - mlcr --tags=app,mlperf,inference,mlcommons,cpp --submitter="MLCommons" --hw_name=gh_${{ matrix.os }} -v --quiet + mlcr app,mlperf,inference,mlcommons,cpp --submitter="MLCommons" --hw_name=gh_${{ matrix.os }} -v --quiet - name: Randomly Execute Step id: random-check run: | @@ -77,4 +77,4 @@ jobs: git config --global credential.https://github.com.helper "!gh auth git-credential" git config --global credential.https://gist.github.com.helper "" git config --global credential.https://gist.github.com.helper "!gh auth git-credential" - mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=dev --commit_message="Results from MLCommons C++ ResNet50 GH action on ${{ matrix.os }}" --quiet + mlcr push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=dev --commit_message="Results from MLCommons C++ ResNet50 GH action on ${{ matrix.os }}" --quiet diff --git a/.github/workflows/test-mlperf-inference-resnet50.yml b/.github/workflows/test-mlperf-inference-resnet50.yml index 4bfbe06ad..9bc5db424 100644 --- a/.github/workflows/test-mlperf-inference-resnet50.yml +++ b/.github/workflows/test-mlperf-inference-resnet50.yml @@ -53,11 +53,11 @@ jobs: - name: Test MLPerf Inference ResNet50 (Windows) if: matrix.os == 'windows-latest' run: | - mlcr --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --hw_name="gh_${{ matrix.os }} x86" --model=resnet50 --adr.loadgen.tags=_from-pip --pip_loadgen=yes --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=500 --target_qps=1 -v --quiet + mlcr run-mlperf,inference,_submission,_short --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --hw_name="gh_${{ matrix.os }} x86" --model=resnet50 --adr.loadgen.tags=_from-pip --pip_loadgen=yes --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=500 --target_qps=1 -v --quiet - name: Test MLPerf Inference ResNet50 (Linux/macOS) if: matrix.os != 'windows-latest' run: | - mlcr --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --hw_name="gh_${{ matrix.os }} x86" --model=resnet50 --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=500 --target_qps=1 -v --quiet + mlcr run-mlperf,inference,_submission,_short --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --hw_name="gh_${{ matrix.os }} x86" --model=resnet50 --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=500 --target_qps=1 -v --quiet # Step for Linux/MacOS - name: Randomly Execute Step (Linux/MacOS) if: runner.os != 'Windows' @@ -101,5 +101,5 @@ jobs: git config --global credential.https://github.com.helper "!gh auth git-credential" git config --global credential.https://gist.github.com.helper "" git config --global credential.https://gist.github.com.helper "!gh auth git-credential" - mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from R50 GH action on ${{ matrix.os }}" --quiet + mlcr push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from R50 GH action on ${{ matrix.os }}" --quiet diff --git a/.github/workflows/test-mlperf-inference-retinanet.yml b/.github/workflows/test-mlperf-inference-retinanet.yml index c1777beae..40e749831 100644 --- a/.github/workflows/test-mlperf-inference-retinanet.yml +++ b/.github/workflows/test-mlperf-inference-retinanet.yml @@ -47,11 +47,11 @@ jobs: - name: Test MLPerf Inference Retinanet using ${{ matrix.backend }} on ${{ matrix.os }} if: matrix.os == 'windows-latest' run: | - mlcr --tags=run,mlperf,inference,generate-run-cmds,_submission,_short --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --hw_name=gh_${{ matrix.os }} --model=retinanet --adr.loadgen.tags=_from-pip --pip_loadgen=yes --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=5 --quiet -v --target_qps=1 + mlcr run,mlperf,inference,generate-run-cmds,_submission,_short --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --hw_name=gh_${{ matrix.os }} --model=retinanet --adr.loadgen.tags=_from-pip --pip_loadgen=yes --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=5 --quiet -v --target_qps=1 - name: Test MLPerf Inference Retinanet using ${{ matrix.backend }} on ${{ matrix.os }} if: matrix.os != 'windows-latest' run: | - mlcr --tags=run,mlperf,inference,generate-run-cmds,_submission,_short --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --hw_name=gh_${{ matrix.os }}_x86 --model=retinanet --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=5 --quiet -v --target_qps=1 + mlcr run,mlperf,inference,generate-run-cmds,_submission,_short --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --hw_name=gh_${{ matrix.os }}_x86 --model=retinanet --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=5 --quiet -v --target_qps=1 # Step for Linux/MacOS - name: Randomly Execute Step (Linux/MacOS) @@ -96,4 +96,4 @@ jobs: git config --global credential.https://github.com.helper "!gh auth git-credential" git config --global credential.https://gist.github.com.helper "" git config --global credential.https://gist.github.com.helper "!gh auth git-credential" - mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from R50 GH action on ${{ matrix.os }}" --quiet + mlcr push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from R50 GH action on ${{ matrix.os }}" --quiet diff --git a/.github/workflows/test-mlperf-inference-rgat.yml b/.github/workflows/test-mlperf-inference-rgat.yml index 026c64886..f8b0e6116 100644 --- a/.github/workflows/test-mlperf-inference-rgat.yml +++ b/.github/workflows/test-mlperf-inference-rgat.yml @@ -35,7 +35,7 @@ jobs: mlc pull repo ${{ github.event.pull_request.head.repo.html_url }} --branch=${{ github.event.pull_request.head.ref }} - name: Test MLPerf Inference R-GAT using ${{ matrix.backend }} on ${{ matrix.os }} run: | - mlcr --tags=run,mlperf,inference,generate-run-cmds,_submission,_short --adr.inference-src.tags=_branch.dev --pull_changes=yes --pull_inference_changes=yes --submitter="MLCommons" --hw_name=gh_${{ matrix.os }}_x86 --model=rgat --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=500 --adr.compiler.tags=gcc --category=datacenter --quiet -v --target_qps=1 + mlcr run,mlperf,inference,generate-run-cmds,_submission,_short --adr.inference-src.tags=_branch.dev --pull_changes=yes --pull_inference_changes=yes --submitter="MLCommons" --hw_name=gh_${{ matrix.os }}_x86 --model=rgat --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=500 --adr.compiler.tags=gcc --category=datacenter --quiet -v --target_qps=1 - name: Retrieve secrets from Keeper id: ksecrets @@ -55,4 +55,4 @@ jobs: git config --global credential.https://github.com.helper "!gh auth git-credential" git config --global credential.https://gist.github.com.helper "" git config --global credential.https://gist.github.com.helper "!gh auth git-credential" - mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from R50 GH action on ${{ matrix.os }}" --quiet + mlcr push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from R50 GH action on ${{ matrix.os }}" --quiet diff --git a/.github/workflows/test-mlperf-inference-rnnt.yml b/.github/workflows/test-mlperf-inference-rnnt.yml index 89ec6e4e2..3dfba6938 100644 --- a/.github/workflows/test-mlperf-inference-rnnt.yml +++ b/.github/workflows/test-mlperf-inference-rnnt.yml @@ -37,4 +37,4 @@ jobs: mlcr --quiet --tags=get,sys-utils-cm - name: Test MLPerf Inference RNNT run: | - mlcr --tags=run,mlperf,inference,generate-run-cmds,_performance-only --submitter="cTuning" --model=rnnt --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=5 --precision=${{ matrix.precision }} --target_qps=5 -v --quiet + mlcr run,mlperf,inference,generate-run-cmds,_performance-only --submitter="cTuning" --model=rnnt --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=5 --precision=${{ matrix.precision }} --target_qps=5 -v --quiet diff --git a/.github/workflows/test-mlperf-inference-sdxl.yaml b/.github/workflows/test-mlperf-inference-sdxl.yaml index 2e287a0be..b76abb2ee 100644 --- a/.github/workflows/test-mlperf-inference-sdxl.yaml +++ b/.github/workflows/test-mlperf-inference-sdxl.yaml @@ -1,7 +1,7 @@ name: MLPerf inference SDXL on: schedule: - - cron: "19 17 * * *" + - cron: "19 17 1 * *" jobs: build_reference: @@ -21,5 +21,5 @@ jobs: export MLC_REPOS=$HOME/GH_MLC python3 -m pip install mlc-scripts mlc pull repo - mlcr --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --docker --model=sdxl --backend=${{ matrix.backend }} --device=cuda --scenario=Offline --test_query_count=1 --precision=${{ matrix.precision }} --quiet --docker_it=no --docker_mlc_repo=gateoverflow@mlperf-automations --adr.compiler.tags=gcc --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --env.MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --clean - mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from self hosted Github actions - NVIDIARTX4090" --quiet --submission_dir=$HOME/gh_action_submissions + mlcr run-mlperf,inference,_submission,_short --submitter="MLCommons" --pull_changes=yes --pull_inference_changes=yes --docker --model=sdxl --backend=${{ matrix.backend }} --device=cuda --scenario=Offline --test_query_count=1 --precision=${{ matrix.precision }} --quiet --docker_it=no --docker_mlc_repo=gateoverflow@mlperf-automations --adr.compiler.tags=gcc --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --env.MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --clean + mlcr push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from self hosted Github actions - NVIDIARTX4090" --quiet --submission_dir=$HOME/gh_action_submissions diff --git a/.github/workflows/test-mlperf-inference-tvm-resnet50.yml b/.github/workflows/test-mlperf-inference-tvm-resnet50.yml index 616f67db2..b2ae35d04 100644 --- a/.github/workflows/test-mlperf-inference-tvm-resnet50.yml +++ b/.github/workflows/test-mlperf-inference-tvm-resnet50.yml @@ -35,7 +35,7 @@ jobs: mlcr --quiet --tags=get,sys-utils-cm - name: Test MLC Tutorial TVM run: | - mlcr --tags=run-mlperf,inference,_submission,_short --adr.python.name=mlperf --adr.python.version_min=3.8 --submitter=MLCommons --implementation=python --hw_name=gh_ubuntu-latest --model=resnet50 --backend=tvm-onnx --device=cpu --scenario=Offline --mode=accuracy --test_query_count=5 --clean --quiet ${{ matrix.extra-options }} + mlcr run-mlperf,inference,_submission,_short --adr.python.name=mlperf --adr.python.version_min=3.8 --submitter=MLCommons --implementation=python --hw_name=gh_ubuntu-latest --model=resnet50 --backend=tvm-onnx --device=cpu --scenario=Offline --mode=accuracy --test_query_count=5 --clean --quiet ${{ matrix.extra-options }} - name: Randomly Execute Step id: random-check run: | @@ -67,4 +67,4 @@ jobs: git config --global credential.https://github.com.helper "!gh auth git-credential" git config --global credential.https://gist.github.com.helper "" git config --global credential.https://gist.github.com.helper "!gh auth git-credential" - mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from R50 GH action on ${{ matrix.os }}" --quiet + mlcr push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_test_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from R50 GH action on ${{ matrix.os }}" --quiet diff --git a/.github/workflows/test-mlperf-loadgen-onnx-huggingface-bert-fp32-squad.yml b/.github/workflows/test-mlperf-loadgen-onnx-huggingface-bert-fp32-squad.yml index 7c9a68d85..cafb93a12 100644 --- a/.github/workflows/test-mlperf-loadgen-onnx-huggingface-bert-fp32-squad.yml +++ b/.github/workflows/test-mlperf-loadgen-onnx-huggingface-bert-fp32-squad.yml @@ -32,4 +32,4 @@ jobs: mlc pull repo ${{ github.event.pull_request.head.repo.html_url }} --branch=${{ github.event.pull_request.head.ref }} - name: Test MLPerf loadgen with HuggingFace bert onnx fp32 squad model run: | - mlcr --tags=python,app,loadgen-generic,_onnxruntime,_custom,_huggingface,_model-stub.ctuning/mlperf-inference-bert-onnx-fp32-squad-v1.1 --quiet + mlcr python,app,loadgen-generic,_onnxruntime,_custom,_huggingface,_model-stub.ctuning/mlperf-inference-bert-onnx-fp32-squad-v1.1 --quiet diff --git a/.github/workflows/test-nvidia-mlperf-inference-implementations.yml b/.github/workflows/test-nvidia-mlperf-inference-implementations.yml index 2fac2632c..9b75db22a 100644 --- a/.github/workflows/test-nvidia-mlperf-inference-implementations.yml +++ b/.github/workflows/test-nvidia-mlperf-inference-implementations.yml @@ -2,7 +2,7 @@ name: MLPerf Inference Nvidia implementations on: schedule: - - cron: "27 11 * * *" + - cron: "27 11 1 * *" jobs: run_nvidia: @@ -58,7 +58,7 @@ jobs: pip install --upgrade mlcflow mlc pull repo mlcommons@mlperf-automations --branch=dev - mlcr --tags=run-mlperf,inference,_all-scenarios,_submission,_full,_r5.0-dev --preprocess_submission=yes --pull_changes=yes --pull_inference_changes=yes --execution_mode=valid --gpu_name=$gpu_name --pull_changes=yes --pull_inference_changes=yes --model=${{ matrix.model }} --submitter="GATEOverflow" --hw_name=$hw_name --implementation=nvidia --backend=tensorrt --category=$category --division=closed --docker_dt --docker_mlc_repo=mlcommons@mlperf-automations --docker_mlc_repo_branch=dev --adr.compiler.tags=gcc --device=cuda --use_model_from_host=yes --use_dataset_from_host=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --clean $docker_string $submission_preprocessor_args --quiet - #mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_unofficial_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from GH action on NVIDIA_$hw_name" --quiet --submission_dir=$HOME/gh_action_submissions --hw_name=$hw_name - mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/GATEOverflow/mlperf_inference_submissions_v5.0 --repo_branch=main --commit_message="Results from GH actions on NVIDIA_$hw_name" --quiet --submission_dir=$HOME/gh_action_submissions --hw_name=$hw_name + mlcr run-mlperf,inference,_all-scenarios,_submission,_full,_r5.0-dev --preprocess_submission=yes --pull_changes=yes --pull_inference_changes=yes --execution_mode=valid --gpu_name=$gpu_name --pull_changes=yes --pull_inference_changes=yes --model=${{ matrix.model }} --submitter="GATEOverflow" --hw_name=$hw_name --implementation=nvidia --backend=tensorrt --category=$category --division=closed --docker_dt --docker_mlc_repo=mlcommons@mlperf-automations --docker_mlc_repo_branch=dev --adr.compiler.tags=gcc --device=cuda --use_model_from_host=yes --use_dataset_from_host=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --clean $docker_string $submission_preprocessor_args --quiet + #mlcr push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_unofficial_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from GH action on NVIDIA_$hw_name" --quiet --submission_dir=$HOME/gh_action_submissions --hw_name=$hw_name + mlcr push,github,mlperf,inference,submission --repo_url=https://github.com/GATEOverflow/mlperf_inference_submissions_v5.0 --repo_branch=main --commit_message="Results from GH actions on NVIDIA_$hw_name" --quiet --submission_dir=$HOME/gh_action_submissions --hw_name=$hw_name diff --git a/.github/workflows/test-qaic-compute-sdk-build.yml b/.github/workflows/test-qaic-compute-sdk-build.yml index 0dff27cd0..6b3e91356 100644 --- a/.github/workflows/test-qaic-compute-sdk-build.yml +++ b/.github/workflows/test-qaic-compute-sdk-build.yml @@ -27,8 +27,8 @@ jobs: - name: Install dependencies run: | pip install mlc-scripts - mlcr --tags=get,sys-utils-cm --quiet + mlcr get,sys-utils-cm --quiet - name: Test QAIC Compute SDK for compilation run: | - mlcr --tags=get,qaic,compute,sdk --adr.llvm.version=${{ matrix.llvm-version }} --quiet + mlcr get,qaic,compute,sdk --adr.llvm.version=${{ matrix.llvm-version }} --quiet diff --git a/.github/workflows/test-qaic-software-kit.yml b/.github/workflows/test-qaic-software-kit.yml index 5cbfc0add..64bba66b4 100644 --- a/.github/workflows/test-qaic-software-kit.yml +++ b/.github/workflows/test-qaic-software-kit.yml @@ -32,8 +32,8 @@ jobs: - name: Pull MLOps repository run: | pip install mlc-scripts - mlcr --tags=get,sys-utils-mlc --quiet + mlcr get,sys-utils-mlc --quiet - name: Test Software Kit for compilation on Ubuntu 20.04 run: | - mlcr --tags=get,qaic,software,kit --adr.compiler.tags=${{ matrix.compiler }} --adr.compiler.version=${{ matrix.llvm-version }} --quiet + mlcr get,qaic,software,kit --adr.compiler.tags=${{ matrix.compiler }} --adr.compiler.version=${{ matrix.llvm-version }} --quiet diff --git a/.github/workflows/test-scc24-sdxl.yaml b/.github/workflows/test-scc24-sdxl.yaml index becc3830d..ffe814ba4 100644 --- a/.github/workflows/test-scc24-sdxl.yaml +++ b/.github/workflows/test-scc24-sdxl.yaml @@ -28,10 +28,10 @@ jobs: pip install --upgrade mlcflow pip install tabulate mlc pull repo - mlcr --tags=run-mlperf,inference,_find-performance,_r4.1-dev,_short,_scc24-base --pull_changes=yes --pull_inference_changes=yes --model=sdxl --implementation=reference --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --precision=${{ matrix.precision }} --docker --docker_it=no --docker_mlc_repo=$MLC_DOCKER_REPO --docker_mlc_repo_branch=$MLC_DOCKER_REPO_BRANCH --docker_dt=yes --quiet --results_dir=$HOME/scc_gh_action_results --submission_dir=$HOME/scc_gh_action_submissions --env.MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --clean - mlcr --tags=run-mlperf,inference,_r4.1-dev,_short,_scc24-base --model=sdxl --implementation=reference --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --precision=${{ matrix.precision }} --docker --docker_it=no --docker_mlc_repo=$MLC_DOCKER_REPO --docker_mlc_repo_branch=$MLC_DOCKER_REPO_BRANCH --docker_dt=yes --quiet --results_dir=$HOME/scc_gh_action_results --submission_dir=$HOME/scc_gh_action_submissions --env.MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --clean - mlcr --tags=generate,inference,submission --clean --run-checker --tar=yes --env.MLC_TAR_OUTFILE=submission.tar.gz --division=open --category=datacenter --run_style=test --adr.submission-checker.tags=_short-run --quiet --submitter=MLCommons --submission_dir=$HOME/scc_gh_action_submissions --results_dir=$HOME/scc_gh_action_results/test_results - mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/gateoverflow/cm4mlperf-inference --repo_branch=mlperf-inference-results-scc24 --commit_message="Results from self hosted Github actions - NVIDIARTX4090" --quiet --submission_dir=$HOME/scc_gh_action_submissions + mlcr run-mlperf,inference,_find-performance,_r4.1-dev,_short,_scc24-base --pull_changes=yes --pull_inference_changes=yes --model=sdxl --implementation=reference --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --precision=${{ matrix.precision }} --docker --docker_it=no --docker_mlc_repo=$MLC_DOCKER_REPO --docker_mlc_repo_branch=$MLC_DOCKER_REPO_BRANCH --docker_dt=yes --quiet --results_dir=$HOME/scc_gh_action_results --submission_dir=$HOME/scc_gh_action_submissions --env.MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --clean + mlcr run-mlperf,inference,_r4.1-dev,_short,_scc24-base --model=sdxl --implementation=reference --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --precision=${{ matrix.precision }} --docker --docker_it=no --docker_mlc_repo=$MLC_DOCKER_REPO --docker_mlc_repo_branch=$MLC_DOCKER_REPO_BRANCH --docker_dt=yes --quiet --results_dir=$HOME/scc_gh_action_results --submission_dir=$HOME/scc_gh_action_submissions --env.MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --clean + mlcr generate,inference,submission --clean --run-checker --tar=yes --env.MLC_TAR_OUTFILE=submission.tar.gz --division=open --category=datacenter --run_style=test --adr.submission-checker.tags=_short-run --quiet --submitter=MLCommons --submission_dir=$HOME/scc_gh_action_submissions --results_dir=$HOME/scc_gh_action_results/test_results + # mlcr push,github,mlperf,inference,submission --repo_url=https://github.com/gateoverflow/cm4mlperf-inference --repo_branch=mlperf-inference-results-scc24 --commit_message="Results from self hosted Github actions - NVIDIARTX4090" --quiet --submission_dir=$HOME/scc_gh_action_submissions build_nvidia: if: github.repository_owner == 'gateoverflow' @@ -56,7 +56,7 @@ jobs: pip install --upgrade mlcflow pip install tabulate mlc pull repo - mlcr --tags=run-mlperf,inference,_find-performance,_r4.1-dev,_short,_scc24-base --pull_changes=yes --model=sdxl --implementation=nvidia --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --precision=${{ matrix.precision }} --docker --docker_it=no --docker_mlc_repo=$MLC_DOCKER_REPO --docker_mlc_repo_branch=$MLC_DOCKER_REPO_BRANCH --docker_dt=yes --pull_changes --quiet --results_dir=$HOME/scc_gh_action_results --submission_dir=$HOME/scc_gh_action_submissions --env.MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --hw_name=go-spr --custom_system_nvidia=yes --clean - mlcr --tags=run-mlperf,inference,_r4.1-dev,_short,_scc24-base --model=sdxl --implementation=nvidia --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --precision=${{ matrix.precision }} --docker --docker_it=no --docker_mlc_repo=$MLC_DOCKER_REPO --docker_mlc_repo_branch=$MLC_DOCKER_REPO_BRANCH --docker_dt=yes --quiet --results_dir=$HOME/scc_gh_action_results --submission_dir=$HOME/scc_gh_action_submissions --env.MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --clean - mlcr --tags=generate,inference,submission --clean --run-checker --tar=yes --env.MLC_TAR_OUTFILE=submission.tar.gz --division=open --category=datacenter --run_style=test --adr.submission-checker.tags=_short-run --quiet --submitter=MLCommons --submission_dir=$HOME/scc_gh_action_submissions --results_dir=$HOME/scc_gh_action_results/test_results - mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/gateoverflow/cm4mlperf-inference --repo_branch=mlperf-inference-results-scc24 --commit_message="Results from self hosted Github actions - NVIDIARTX4090" --quiet --submission_dir=$HOME/scc_gh_action_submissions + mlcr run-mlperf,inference,_find-performance,_r4.1-dev,_short,_scc24-base --pull_changes=yes --model=sdxl --implementation=nvidia --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --precision=${{ matrix.precision }} --docker --docker_it=no --docker_mlc_repo=$MLC_DOCKER_REPO --docker_mlc_repo_branch=$MLC_DOCKER_REPO_BRANCH --docker_dt=yes --pull_changes --quiet --results_dir=$HOME/scc_gh_action_results --submission_dir=$HOME/scc_gh_action_submissions --env.MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --hw_name=go-spr --custom_system_nvidia=yes --clean + mlcr run-mlperf,inference,_r4.1-dev,_short,_scc24-base --model=sdxl --implementation=nvidia --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --precision=${{ matrix.precision }} --docker --docker_it=no --docker_mlc_repo=$MLC_DOCKER_REPO --docker_mlc_repo_branch=$MLC_DOCKER_REPO_BRANCH --docker_dt=yes --quiet --results_dir=$HOME/scc_gh_action_results --submission_dir=$HOME/scc_gh_action_submissions --env.MLC_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --clean + mlcr generate,inference,submission --clean --run-checker --tar=yes --env.MLC_TAR_OUTFILE=submission.tar.gz --division=open --category=datacenter --run_style=test --adr.submission-checker.tags=_short-run --quiet --submitter=MLCommons --submission_dir=$HOME/scc_gh_action_submissions --results_dir=$HOME/scc_gh_action_results/test_results + # mlcr push,github,mlperf,inference,submission --repo_url=https://github.com/gateoverflow/cm4mlperf-inference --repo_branch=mlperf-inference-results-scc24 --commit_message="Results from self hosted Github actions - NVIDIARTX4090" --quiet --submission_dir=$HOME/scc_gh_action_submissions diff --git a/script/app-mlperf-inference-qualcomm/README_aws_dl2q.24xlarge.md b/script/app-mlperf-inference-qualcomm/README_aws_dl2q.24xlarge.md index e27a6f3ec..cd19536a7 100644 --- a/script/app-mlperf-inference-qualcomm/README_aws_dl2q.24xlarge.md +++ b/script/app-mlperf-inference-qualcomm/README_aws_dl2q.24xlarge.md @@ -13,14 +13,14 @@ image from the Community AMIs is the recommended OS image as it comes with the Q sudo yum install -y python38-devel git python3.8 -m pip install cmind cm pull repo mlcommons@cm4mlops -mlcr --tags=get,python --version_min=3.8.1 +mlcr get,python --version_min=3.8.1 ``` ## Bert-99 ### Quick performance run ``` -mlcr --tags=generate-run-cmds,inference,_performance-only --device=qaic \ +mlcr generate-run-cmds,inference,_performance-only --device=qaic \ --backend=glow --scenario=Offline --implementation=kilt --model=bert-99 \ --test_query_count=40000 --precision=uint8 --rerun --quiet \ --adr.mlperf-inference-implementation.tags=_loadgen-batch-size.4096,_dl2q.24xlarge \ @@ -29,7 +29,7 @@ mlcr --tags=generate-run-cmds,inference,_performance-only --device=qaic \ ### Full valid run ``` -mlcr --tags=generate-run-cmds,inference,_submission --device=qaic \ +mlcr generate-run-cmds,inference,_submission --device=qaic \ --backend=glow --scenario=Offline --implementation=kilt --model=bert-99 --precision=uint8 \ --adr.mlperf-inference-implementation.tags=_loadgen-batch-size.4096,_dl2q.24xlarge \ --rerun --quiet --execution-mode=valid @@ -45,13 +45,13 @@ The expected accuracy is ~90 (Optional) If you have Imagenet 2012 validation dataset downloaded, you can register it in CM as follows. This step is optional and can avoid the download from the public URL which can be slow at times. ``` -mlcr --tags=get,dataset,imagenet,original,_full --env.IMAGENET_PATH=`pwd`/imagenet-2012-val +mlcr get,dataset,imagenet,original,_full --env.IMAGENET_PATH=`pwd`/imagenet-2012-val ``` ### Quick performance run ``` -mlcr --tags=generate-run-cmds,inference,_performance-only --device=qaic --backend=glow \ +mlcr generate-run-cmds,inference,_performance-only --device=qaic --backend=glow \ --scenario=Offline --implementation=kilt --model=resnet50 \ --test_query_count=400000 --precision=uint8 --rerun --adr.compiler.tags=gcc \ --adr.mlperf-inference-implementation.tags=_bs.8,_dl2q.24xlarge --execution-mode=test --quiet @@ -60,7 +60,7 @@ mlcr --tags=generate-run-cmds,inference,_performance-only --device=qaic --backen ### Full valid run ``` -mlcr --tags=generate-run-cmds,inference,_submission --device=qaic --backend=glow \ +mlcr generate-run-cmds,inference,_submission --device=qaic --backend=glow \ --scenario=Offline --implementation=kilt --model=resnet50 \ --precision=uint8 --rerun --adr.compiler.tags=gcc \ --adr.mlperf-inference-implementation.tags=_bs.8,_dl2q.24xlarge --execution-mode=valid --quiet @@ -76,7 +76,7 @@ Expected accuracy is 75.936% ### Quick performance run ``` -mlcr --tags=generate-run-cmds,inference,_performance-only --device=qaic --backend=glow \ +mlcr generate-run-cmds,inference,_performance-only --device=qaic --backend=glow \ --scenario=Offline --implementation=kilt --model=retinanet --test_query_count=40000 --precision=uint8 \ --rerun --quiet --adr.mlperf-inference-implementation.tags=_loadgen-batch-size.1,_dl2q.24xlarge,_bs.1 \ --adr.compiler.tags=gcc --execution-mode=test @@ -85,7 +85,7 @@ mlcr --tags=generate-run-cmds,inference,_performance-only --device=qaic --backen ### Full valid run ``` -mlcr --tags=generate-run-cmds,inference,_submission --device=qaic --backend=glow \ +mlcr generate-run-cmds,inference,_submission --device=qaic --backend=glow \ --scenario=Offline --implementation=kilt --model=retinanet \ --precision=uint8 --rerun --adr.compiler.tags=gcc --adr.dataset-preprocessed.tags=_custom-annotations \ --adr.mlperf-inference-implementation.tags=_bs.1,_dl2q.24xlarge --execution-mode=valid --quiet diff --git a/script/app-mlperf-inference/meta.yaml b/script/app-mlperf-inference/meta.yaml index f041b8f66..7049671d7 100644 --- a/script/app-mlperf-inference/meta.yaml +++ b/script/app-mlperf-inference/meta.yaml @@ -1937,7 +1937,7 @@ docker: MLC_MLPERF_INFERENCE_SUBMISSION_DIR: [ on ] pre_run_cmds: - #- mlc pull repo && mlcr --tags=get,git,repo,_repo.https://github.com/GATEOverflow/inference_results_v4.0.git --update + #- mlc pull repo && mlcr get,git,repo,_repo.https://github.com/GATEOverflow/inference_results_v4.0.git --update - mlc pull repo mounts: - "${{ MLC_DATASET_IMAGENET_PATH }}:${{ MLC_DATASET_IMAGENET_PATH }}" diff --git a/script/benchmark-any-mlperf-inference-implementation/run-template.sh b/script/benchmark-any-mlperf-inference-implementation/run-template.sh index 0224c34dd..8e0cb42c0 100644 --- a/script/benchmark-any-mlperf-inference-implementation/run-template.sh +++ b/script/benchmark-any-mlperf-inference-implementation/run-template.sh @@ -43,47 +43,47 @@ function run_test() { results_dir=$HOME/results_dir #Add your run commands here... -find_performance_cmd='mlcr --tags=generate-run-cmds,inference,_find-performance \ +find_performance_cmd='mlcr generate-run-cmds,inference,_find-performance \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=edge --division=open --scenario=Offline --quiet --test_query_count=$test_query_count $rerun ${EXTRA_ARGS}' -find_ss_performance_cmd='mlcr --tags=generate-run-cmds,inference,_find-performance \ +find_ss_performance_cmd='mlcr generate-run-cmds,inference,_find-performance \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=edge --division=open --scenario=SingleStream --quiet --test_query_count=$test_query_count $rerun ${EXTRA_RUN_ARGS} ${EXTRA_ARGS}' -submission_cmd='mlcr --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +submission_cmd='mlcr generate-run-cmds,inference,_submission,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet \ --skip_submission_generation=yes --execution-mode=valid ${POWER_STRING} ${EXTRA_RUN_ARGS} ${EXTRA_ARGS}' -submission_cmd_scenario='mlcr --tags=generate-run-cmds,inference,_submission --scenario=$scenario \ +submission_cmd_scenario='mlcr generate-run-cmds,inference,_submission --scenario=$scenario \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet \ --skip_submission_generation=yes --execution-mode=valid ${POWER_STRING} ${EXTRA_RUN_ARGS} ${EXTRA_ARGS}' -readme_cmd_single='mlcr --tags=generate-run-cmds,inference,_populate-readme --scenario=$scenario \ +readme_cmd_single='mlcr generate-run-cmds,inference,_populate-readme --scenario=$scenario \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet \ --skip_submission_generation=yes --execution-mode=valid ${POWER_STRING} ${EXTRA_RUN_ARGS} ${EXTRA_ARGS}' -readme_cmd='mlcr --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \ +readme_cmd='mlcr generate-run-cmds,inference,_populate-readme,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet \ --skip_submission_generation=yes --execution-mode=valid ${POWER_STRING} ${EXTRA_RUN_ARGS} ${EXTRA_ARGS}' -tflite_accuracy_cmd='mlcr --tags=run,mobilenet-models,_tflite,_accuracy-only$extra_tags \ +tflite_accuracy_cmd='mlcr run,mobilenet-models,_tflite,_accuracy-only$extra_tags \ --adr.compiler.tags=gcc \ ${extra_option} \ ${EXTRA_ARGS}' -tflite_performance_cmd='mlcr --tags=run,mobilenet-models,_tflite,_performance-only$extra_tags \ +tflite_performance_cmd='mlcr run,mobilenet-models,_tflite,_performance-only$extra_tags \ ${POWER_STRING} \ --adr.compiler.tags=gcc \ ${extra_option} \ ${EXTRA_ARGS}' -tflite_readme_cmd='mlcr --tags=run,mobilenet-models,_tflite,_populate-readme$extra_tags \ +tflite_readme_cmd='mlcr run,mobilenet-models,_tflite,_populate-readme$extra_tags \ ${POWER_STRING} \ --adr.compiler.tags=gcc \ ${extra_option} \ diff --git a/script/benchmark-program/customize.py b/script/benchmark-program/customize.py index 35cf623ec..25b7a17e3 100644 --- a/script/benchmark-program/customize.py +++ b/script/benchmark-program/customize.py @@ -84,7 +84,7 @@ def preprocess(i): pre_run_cmd += ' && ' # running the script as a process in background - pre_run_cmd = pre_run_cmd + 'mlcr --tags=runtime,system,utilisation' + \ + pre_run_cmd = pre_run_cmd + 'mlcr runtime,system,utilisation' + \ env['MLC_SYS_UTILISATION_SCRIPT_TAGS'] + ' --quiet & ' # obtain the command if of the background process pre_run_cmd += r" cmd_pid=\$! && echo CMD_PID=\$cmd_pid" diff --git a/script/extract-file/customize.py b/script/extract-file/customize.py index bedfe41f5..fd6ab8235 100644 --- a/script/extract-file/customize.py +++ b/script/extract-file/customize.py @@ -125,7 +125,7 @@ def preprocess(i): q + extract_to_folder + q env['MLC_EXTRACT_EXTRACTED_FILENAME'] = extract_to_folder - x = '"' if ' ' in filename else '' + x = q if ' ' in filename else '' env['MLC_EXTRACT_CMD'] = env['MLC_EXTRACT_PRE_CMD'] + env['MLC_EXTRACT_TOOL'] + ' ' + \ env.get('MLC_EXTRACT_TOOL_EXTRA_OPTIONS', '') + \ ' ' + env.get('MLC_EXTRACT_TOOL_OPTIONS', '') + ' ' + x + filename + x diff --git a/script/generate-mlperf-inference-submission/customize.py b/script/generate-mlperf-inference-submission/customize.py index 7dda1acbb..a10ce68a5 100644 --- a/script/generate-mlperf-inference-submission/customize.py +++ b/script/generate-mlperf-inference-submission/customize.py @@ -160,15 +160,11 @@ def generate_submission(env, state, inp, submission_division): print('* MLPerf inference submitter: {}'.format(submitter)) if env.get('MLC_MLPERF_SUT_SW_NOTES_EXTRA', '') != '': - sw_notes = f"""{ - system_meta_tmp['sw_notes']} { - env['MLC_MLPERF_SUT_SW_NOTES_EXTRA']}""" + sw_notes = f"""{system_meta_tmp.get('sw_notes','')} {env['MLC_MLPERF_SUT_SW_NOTES_EXTRA']}""" system_meta_tmp['sw_notes'] = sw_notes if env.get('MLC_MLPERF_SUT_HW_NOTES_EXTRA', '') != '': - hw_notes = f"""{ - system_meta_tmp['hw_notes']} { - env['MLC_MLPERF_SUT_HW_NOTES_EXTRA']}""" + hw_notes = f"""{system_meta_tmp.get('hw_notes', '')} {env['MLC_MLPERF_SUT_HW_NOTES_EXTRA']}""" system_meta_tmp['hw_notes'] = hw_notes path_submission = os.path.join(path_submission_division, submitter) diff --git a/script/get-lib-armnn/customize.py b/script/get-lib-armnn/customize.py index 263850444..b6aaae7a5 100644 --- a/script/get-lib-armnn/customize.py +++ b/script/get-lib-armnn/customize.py @@ -11,7 +11,7 @@ def preprocess(i): version = env['MLC_LIB_ARMNN_VERSION'] if env.get('MLC_HOST_PLATFORM_FLAVOR', '') == 'x86_64': url = f"https://github.com/ARM-software/armnn/releases/download/{version}/ArmNN-linux-x86_64.tar.gz" - elif env.get('MLC_HOST_PLATFORM_FLAVOR', '') == 'aarch64': + elif env.get('MLC_HOST_PLATFORM_FLAVOR', '') in ['arm64', 'aarch64']: url = f"https://github.com/ARM-software/armnn/releases/download/{version}/ArmNN-linux-aarch64.tar.gz" env['MLC_LIB_ARMNN_PREBUILT_BINARY_URL'] = url diff --git a/script/get-platform-details/README-EXTRA.md b/script/get-platform-details/README-EXTRA.md index 45ac261f2..c5b6ebfc2 100644 --- a/script/get-platform-details/README-EXTRA.md +++ b/script/get-platform-details/README-EXTRA.md @@ -1,7 +1,7 @@ Please execute the following CM command to obtain the platform details of the System Under Test (SUT): ``` -mlcr --tags=get,platform-details --platform_details_dir= +mlcr get,platform-details --platform_details_dir= ``` diff --git a/script/get-rocm-devices/README.md b/script/get-rocm-devices/README.md index 294a147bb..722b01028 100644 --- a/script/get-rocm-devices/README.md +++ b/script/get-rocm-devices/README.md @@ -1,4 +1,4 @@ Run this script ``` -mlcr --tags=get,rocm-devices +mlcr get,rocm-devices ``` diff --git a/script/run-all-mlperf-models/run-bert-macos.sh b/script/run-all-mlperf-models/run-bert-macos.sh index e0275153c..edf27333a 100644 --- a/script/run-all-mlperf-models/run-bert-macos.sh +++ b/script/run-all-mlperf-models/run-bert-macos.sh @@ -37,26 +37,26 @@ function run_test() { power=' --power=yes --adr.mlperf-power-client.power_server=192.168.0.15 --adr.mlperf-power-client.port=4950 ' power="" #Add your run commands here... -find_performance_cmd='mlcr --tags=generate-run-cmds,inference,_find-performance \ +find_performance_cmd='mlcr generate-run-cmds,inference,_find-performance \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=edge --division=open --scenario=Offline --quiet --test_query_count=$test_query_count $rerun' -submission_cmd='mlcr --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +submission_cmd='mlcr generate-run-cmds,inference,_submission,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' -submission_cmd_scenario='mlcr --tags=generate-run-cmds,inference,_submission --scenario=$scenario \ +submission_cmd_scenario='mlcr generate-run-cmds,inference,_submission --scenario=$scenario \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' -readme_cmd_single='mlcr --tags=generate-run-cmds,inference,_populate-readme \ +readme_cmd_single='mlcr generate-run-cmds,inference,_populate-readme \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' -readme_cmd='mlcr --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \ +readme_cmd='mlcr generate-run-cmds,inference,_populate-readme,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' diff --git a/script/run-all-mlperf-models/run-bert.sh b/script/run-all-mlperf-models/run-bert.sh index 530c55e48..23d169dd8 100644 --- a/script/run-all-mlperf-models/run-bert.sh +++ b/script/run-all-mlperf-models/run-bert.sh @@ -38,26 +38,26 @@ function run_test() { power=' --power=yes --adr.mlperf-power-client.power_server=192.168.0.15 --adr.mlperf-power-client.port=4950 ' #Add your run commands here... -find_performance_cmd='mlcr --tags=generate-run-cmds,inference,_find-performance \ +find_performance_cmd='mlcr generate-run-cmds,inference,_find-performance \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=edge --division=open --scenario=Offline --quiet --test_query_count=$test_query_count $rerun' -submission_cmd='mlcr --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +submission_cmd='mlcr generate-run-cmds,inference,_submission,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' -submission_cmd_scenario='mlcr --tags=generate-run-cmds,inference,_submission --scenario=$scenario \ +submission_cmd_scenario='mlcr generate-run-cmds,inference,_submission --scenario=$scenario \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' -readme_cmd_single='mlcr --tags=generate-run-cmds,inference,_populate-readme \ +readme_cmd_single='mlcr generate-run-cmds,inference,_populate-readme \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' -readme_cmd='mlcr --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \ +readme_cmd='mlcr generate-run-cmds,inference,_populate-readme,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' diff --git a/script/run-all-mlperf-models/run-cpp-implementation.sh b/script/run-all-mlperf-models/run-cpp-implementation.sh index 7159cbcd8..a4db90e68 100644 --- a/script/run-all-mlperf-models/run-cpp-implementation.sh +++ b/script/run-all-mlperf-models/run-cpp-implementation.sh @@ -29,21 +29,21 @@ division="closed" POWER=" --power=yes --adr.mlperf-power-client.power_server=192.168.0.15 --adr.mlperf-power-client.port=4950 " POWER="" -run "mlcr --tags=set,system,performance,mode" +run "mlcr set,system,performance,mode" #cpp -run "mlcr --tags=generate-run-cmds,inference,_find-performance \ +run "mlcr generate-run-cmds,inference,_find-performance \ --model=resnet50 --implementation=cpp --device=cpu --backend=onnxruntime \ --adr.compiler.tags=gcc \ --category=edge --division=open --scenario=Offline --quiet --test_query_count=2000 " -run "mlcr --tags=generate-run-cmds,inference,_find-performance \ +run "mlcr generate-run-cmds,inference,_find-performance \ --model=retinanet --implementation=cpp --device=cpu --backend=onnxruntime \ --adr.compiler.tags=gcc \ --category=edge --division=open --scenario=Offline --quiet" -run "mlcr --tags=generate-run-cmds,inference,_submission \ +run "mlcr generate-run-cmds,inference,_submission \ --model=resnet50 --implementation=cpp --device=cpu --backend=onnxruntime \ --scenario=Offline \ --category=edge --division=$division --quiet \ @@ -53,7 +53,7 @@ run "mlcr --tags=generate-run-cmds,inference,_submission \ ${POWER} \ --results_dir=$HOME/results_dir" -run "mlcr --tags=generate-run-cmds,inference,_submission \ +run "mlcr generate-run-cmds,inference,_submission \ --model=retinanet --implementation=cpp --device=cpu --backend=onnxruntime \ --scenario=Offline \ --category=edge --division=$division --quiet \ @@ -63,7 +63,7 @@ run "mlcr --tags=generate-run-cmds,inference,_submission \ ${POWER} \ --results_dir=$HOME/results_dir" -run "mlcr --tags=generate-run-cmds,inference,_submission \ +run "mlcr generate-run-cmds,inference,_submission \ --model=resnet50 --implementation=cpp --device=cpu --backend=onnxruntime \ --scenario=SingleStream \ --category=edge --division=$division --quiet \ @@ -73,7 +73,7 @@ run "mlcr --tags=generate-run-cmds,inference,_submission \ ${POWER} \ --results_dir=$HOME/results_dir" -run "mlcr --tags=generate-run-cmds,inference,_submission \ +run "mlcr generate-run-cmds,inference,_submission \ --model=retinanet --implementation=cpp --device=cpu --backend=onnxruntime \ --scenario=SingleStream \ --category=edge --division=$division --quiet \ @@ -85,20 +85,20 @@ ${POWER} \ # GPU -run "mlcr --tags=generate-run-cmds,inference,_find-performance \ +run "mlcr generate-run-cmds,inference,_find-performance \ --model=resnet50 --implementation=cpp --device=cuda --backend=onnxruntime \ --adr.compiler.tags=gcc \ --test_query_count=20000 \ --category=edge --division=open --scenario=Offline --quiet" -run "mlcr --tags=generate-run-cmds,inference,_find-performance \ +run "mlcr generate-run-cmds,inference,_find-performance \ --model=retinanet --implementation=cpp --device=cuda --backend=onnxruntime \ --adr.compiler.tags=gcc \ --test_query_count=2000 \ --category=edge --division=open --scenario=Offline --quiet" -run "mlcr --tags=generate-run-cmds,inference,_submission \ +run "mlcr generate-run-cmds,inference,_submission \ --scenario=Offline \ --model=resnet50 --implementation=cpp --device=cuda --backend=onnxruntime \ --category=edge --division=$division --quiet \ @@ -108,7 +108,7 @@ run "mlcr --tags=generate-run-cmds,inference,_submission \ ${POWER} \ --results_dir=$HOME/results_dir" -run "mlcr --tags=generate-run-cmds,inference,_submission \ +run "mlcr generate-run-cmds,inference,_submission \ --model=retinanet --implementation=cpp --device=cuda --backend=onnxruntime \ --scenario=Offline \ --category=edge --division=$division --quiet \ @@ -119,7 +119,7 @@ ${POWER} \ --results_dir=$HOME/results_dir" -run "mlcr --tags=generate-run-cmds,inference,_submission \ +run "mlcr generate-run-cmds,inference,_submission \ --scenario=Offline \ --model=resnet50 --implementation=cpp --device=cuda --backend=onnxruntime \ --scenario=SingleStream \ @@ -130,7 +130,7 @@ run "mlcr --tags=generate-run-cmds,inference,_submission \ ${POWER} \ --results_dir=$HOME/results_dir" -run "mlcr --tags=generate-run-cmds,inference,_submission \ +run "mlcr generate-run-cmds,inference,_submission \ --model=retinanet --implementation=cpp --device=cuda --backend=onnxruntime \ --scenario=SingleStream \ --category=edge --division=$division --quiet \ @@ -141,7 +141,7 @@ ${POWER} \ --results_dir=$HOME/results_dir" #multistream -run "mlcr --tags=generate-run-cmds,inference,_submission \ +run "mlcr generate-run-cmds,inference,_submission \ --scenario=Offline \ --model=resnet50 --implementation=cpp --device=cuda --backend=onnxruntime \ --scenario=MultiStream \ @@ -152,7 +152,7 @@ run "mlcr --tags=generate-run-cmds,inference,_submission \ ${POWER} \ --results_dir=$HOME/results_dir" -run "mlcr --tags=generate-run-cmds,inference,_submission \ +run "mlcr generate-run-cmds,inference,_submission \ --model=retinanet --implementation=cpp --device=cuda --backend=onnxruntime \ --scenario=MultiStream \ --category=edge --division=$division --quiet \ diff --git a/script/run-all-mlperf-models/run-mobilenet-models.sh b/script/run-all-mlperf-models/run-mobilenet-models.sh index 8fa760c8e..4190bf82c 100644 --- a/script/run-all-mlperf-models/run-mobilenet-models.sh +++ b/script/run-all-mlperf-models/run-mobilenet-models.sh @@ -32,35 +32,35 @@ extra_tags="" #Add your run commands here... # run "$MLC_RUN_CMD" -run "mlcr --tags=run,mobilenet-models,_tflite,_accuracy-only$extra_tags \ +run "mlcr run,mobilenet-models,_tflite,_accuracy-only$extra_tags \ --adr.compiler.tags=gcc \ ${extra_option} \ --results_dir=$HOME/results_dir" -run "mlcr --tags=run,mobilenet-models,_tflite,_performance-only$extra_tags \ +run "mlcr run,mobilenet-models,_tflite,_performance-only$extra_tags \ ${POWER} \ --adr.compiler.tags=gcc \ ${extra_option} \ --results_dir=$HOME/results_dir" -run "mlcr --tags=run,mobilenet-models,_tflite,_populate-readme$extra_tags \ +run "mlcr run,mobilenet-models,_tflite,_populate-readme$extra_tags \ ${POWER} \ --adr.compiler.tags=gcc \ ${extra_option} \ --results_dir=$HOME/results_dir" -run "mlcr --tags=run,mobilenet-models,_tflite,_armnn,_neon,_accuracy-only$extra_tags \ +run "mlcr run,mobilenet-models,_tflite,_armnn,_neon,_accuracy-only$extra_tags \ --adr.compiler.tags=gcc \ ${extra_option} \ --results_dir=$HOME/results_dir" -run "mlcr --tags=run,mobilenet-models,_tflite,_armnn,_neon,_performance-only$extra_tags \ +run "mlcr run,mobilenet-models,_tflite,_armnn,_neon,_performance-only$extra_tags \ ${POWER} \ ${extra_option} \ --adr.compiler.tags=gcc \ --results_dir=$HOME/results_dir" -run "mlcr --tags=run,mobilenet-models,_tflite,_armnn,_neon,_populate-readme$extra_tags \ +run "mlcr run,mobilenet-models,_tflite,_armnn,_neon,_populate-readme$extra_tags \ ${POWER} \ ${extra_option} \ --adr.compiler.tags=gcc \ diff --git a/script/run-all-mlperf-models/run-nvidia-4090.sh b/script/run-all-mlperf-models/run-nvidia-4090.sh index bc4eb5ae5..1fc37c864 100644 --- a/script/run-all-mlperf-models/run-nvidia-4090.sh +++ b/script/run-all-mlperf-models/run-nvidia-4090.sh @@ -38,7 +38,7 @@ power="" power=" --power=yes --adr.mlperf-power-client.power_server=192.168.0.15" #Add your run commands here... # run "$MLC_RUN_CMD" -find_performance_cmd='mlcr --tags=generate-run-cmds,inference,_find-performance \ +find_performance_cmd='mlcr generate-run-cmds,inference,_find-performance \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=edge --division=open --scenario=Offline --quiet --test_query_count=$test_query_count' @@ -49,7 +49,7 @@ find_performance_cmd='mlcr --tags=generate-run-cmds,inference,_find-performance #run "3d-unet" "30" "${find_performance_cmd}" -submission_cmd='mlcr --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +submission_cmd='mlcr generate-run-cmds,inference,_submission,_all-scenarios \ --model=$model --execution-mode=valid --implementation=$implementation --device=$device --backend=$backend --results_dir=$HOME/results_dir \ --category=$category --division=$division --skip_submission_generation=yes --quiet $power' diff --git a/script/run-all-mlperf-models/run-nvidia-a100.sh b/script/run-all-mlperf-models/run-nvidia-a100.sh index 70069b9a7..a3489e7d2 100644 --- a/script/run-all-mlperf-models/run-nvidia-a100.sh +++ b/script/run-all-mlperf-models/run-nvidia-a100.sh @@ -37,7 +37,7 @@ connection_type="sxm" #Add your run commands here... # run "$MLC_RUN_CMD" -find_performance_cmd='mlcr --tags=generate-run-cmds,inference,_find-performance \ +find_performance_cmd='mlcr generate-run-cmds,inference,_find-performance \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=edge --division=open --scenario=Offline --quiet --test_query_count=$test_query_count' @@ -48,7 +48,7 @@ run "bert-99" "20000" "${find_performance_cmd}" run "3d-unet-99.9" "30" "${find_performance_cmd}" -submission_cmd='mlcr --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +submission_cmd='mlcr generate-run-cmds,inference,_submission,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --adr.nvidia-harensss.tags=_${connection_type} $power' diff --git a/script/run-all-mlperf-models/run-nvidia-t4.sh b/script/run-all-mlperf-models/run-nvidia-t4.sh index facdb0a60..adde34344 100644 --- a/script/run-all-mlperf-models/run-nvidia-t4.sh +++ b/script/run-all-mlperf-models/run-nvidia-t4.sh @@ -35,7 +35,7 @@ category="edge,datacenter" #Add your run commands here... # run "$MLC_RUN_CMD" -find_performance_cmd='mlcr --tags=generate-run-cmds,inference,_find-performance \ +find_performance_cmd='mlcr generate-run-cmds,inference,_find-performance \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=edge --division=open --scenario=Offline --quiet --test_query_count=$test_query_count' @@ -47,7 +47,7 @@ run "bert-99.9" "5000" "${find_performance_cmd}" run "3d-unet" "10" "${find_performance_cmd}" -submission_cmd='mlcr --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +submission_cmd='mlcr generate-run-cmds,inference,_submission,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet' diff --git a/script/run-all-mlperf-models/run-pruned-bert.sh b/script/run-all-mlperf-models/run-pruned-bert.sh index b7bc2beae..f5ed64042 100644 --- a/script/run-all-mlperf-models/run-pruned-bert.sh +++ b/script/run-all-mlperf-models/run-pruned-bert.sh @@ -45,7 +45,7 @@ scenario="Offline" if [[ $scenario == "Offline" ]]; then for stub in ${zoo_stub_list[@]}; do -cmd="mlcr --tags=run,mlperf,inference,generate-run-cmds,_find-performance \ +cmd="mlcr run,mlperf,inference,generate-run-cmds,_find-performance \ --adr.python.version_min=3.8 \ --implementation=reference \ --model=bert-99 \ @@ -64,7 +64,7 @@ done fi for stub in ${zoo_stub_list[@]}; do - cmd="mlcr --tags=run,mlperf,inference,generate-run-cmds \ + cmd="mlcr run,mlperf,inference,generate-run-cmds \ --adr.python.version_min=3.8 \ --adr.compiler.tags=gcc \ --implementation=reference \ diff --git a/script/run-all-mlperf-models/run-reference-models.sh b/script/run-all-mlperf-models/run-reference-models.sh index 84d7526fd..01766158a 100644 --- a/script/run-all-mlperf-models/run-reference-models.sh +++ b/script/run-all-mlperf-models/run-reference-models.sh @@ -25,43 +25,43 @@ function run() { division="closed" #Add your run commands here... # run "$MLC_RUN_CMD" -run "mlcr --tags=generate-run-cmds,inference,_find-performance \ +run "mlcr generate-run-cmds,inference,_find-performance \ --model=resnet50 --implementation=reference --device=cpu --backend=onnxruntime \ --category=edge --division=open --scenario=Offline --quiet --test_query_count=100" -run "mlcr --tags=generate-run-cmds,inference,_find-performance \ +run "mlcr generate-run-cmds,inference,_find-performance \ --model=rnnt --implementation=reference --device=cpu --backend=pytorch \ --category=edge --division=open --scenario=Offline --quiet" -run "mlcr --tags=generate-run-cmds,inference,_find-performance \ +run "mlcr generate-run-cmds,inference,_find-performance \ --model=retinanet --implementation=reference --device=cpu --backend=pytorch \ --category=edge --division=open --scenario=Offline --quiet" -run "mlcr --tags=generate-run-cmds,inference,_find-performance \ +run "mlcr generate-run-cmds,inference,_find-performance \ --model=bert-99 --implementation=reference --device=cpu --backend=pytorch \ --category=edge --division=open --scenario=Offline --quiet" -run "mlcr --tags=generate-run-cmds,inference,_find-performance \ +run "mlcr generate-run-cmds,inference,_find-performance \ --model=3d-unet-99 --implementation=reference --device=cpu --backend=pytorch \ --category=edge --division=open --scenario=Offline --quiet" -run "mlcr --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +run "mlcr generate-run-cmds,inference,_submission,_all-scenarios \ --model=resnet50 --implementation=reference --device=cpu --backend=onnxruntime \ --category=edge --division=$division --quiet" -run "mlcr --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +run "mlcr generate-run-cmds,inference,_submission,_all-scenarios \ --model=rnnt --implementation=reference --device=cpu --backend=pytorch \ --category=edge --division=$division --quiet" -run "mlcr --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +run "mlcr generate-run-cmds,inference,_submission,_all-scenarios \ --model=retinanet --implementation=reference --device=cpu --backend=pytorch \ --category=edge --division=$division --quiet" -run "mlcr --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +run "mlcr generate-run-cmds,inference,_submission,_all-scenarios \ --model=bert-99 --implementation=reference --device=cpu --backend=pytorch \ --category=edge --division=$division --quiet" -run "mlcr --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +run "mlcr generate-run-cmds,inference,_submission,_all-scenarios \ --model=3d-unet-99 --implementation=reference --device=cpu --backend=pytorch \ --category=edge --division=$division --quiet" diff --git a/script/run-all-mlperf-models/run-resnet50-macos.sh b/script/run-all-mlperf-models/run-resnet50-macos.sh index ea2f91346..81b8d7124 100644 --- a/script/run-all-mlperf-models/run-resnet50-macos.sh +++ b/script/run-all-mlperf-models/run-resnet50-macos.sh @@ -37,26 +37,26 @@ function run_test() { power=' --power=yes --adr.mlperf-power-client.power_server=192.168.0.15 --adr.mlperf-power-client.port=4950 ' power="" #Add your run commands here... -find_performance_cmd='mlcr --tags=generate-run-cmds,inference,_find-performance \ +find_performance_cmd='mlcr generate-run-cmds,inference,_find-performance \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=edge --division=open --scenario=Offline --quiet --test_query_count=$test_query_count $rerun' -submission_cmd='mlcr --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +submission_cmd='mlcr generate-run-cmds,inference,_submission,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' -submission_cmd_scenario='mlcr --tags=generate-run-cmds,inference,_submission --scenario=$scenario \ +submission_cmd_scenario='mlcr generate-run-cmds,inference,_submission --scenario=$scenario \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' -readme_cmd_single='mlcr --tags=generate-run-cmds,inference,_populate-readme \ +readme_cmd_single='mlcr generate-run-cmds,inference,_populate-readme \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' -readme_cmd='mlcr --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \ +readme_cmd='mlcr generate-run-cmds,inference,_populate-readme,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' diff --git a/script/run-all-mlperf-models/run-resnet50.sh b/script/run-all-mlperf-models/run-resnet50.sh index d9945c745..2ef2c3cff 100644 --- a/script/run-all-mlperf-models/run-resnet50.sh +++ b/script/run-all-mlperf-models/run-resnet50.sh @@ -37,26 +37,26 @@ function run_test() { power=' --power=yes --adr.mlperf-power-client.power_server=192.168.0.15 --adr.mlperf-power-client.port=4950 ' #Add your run commands here... -find_performance_cmd='mlcr --tags=generate-run-cmds,inference,_find-performance \ +find_performance_cmd='mlcr generate-run-cmds,inference,_find-performance \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=edge --division=open --scenario=Offline --quiet --test_query_count=$test_query_count $rerun' -submission_cmd='mlcr --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +submission_cmd='mlcr generate-run-cmds,inference,_submission,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' -submission_cmd_scenario='mlcr --tags=generate-run-cmds,inference,_submission --scenario=$scenario \ +submission_cmd_scenario='mlcr generate-run-cmds,inference,_submission --scenario=$scenario \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' -readme_cmd_single='mlcr --tags=generate-run-cmds,inference,_populate-readme \ +readme_cmd_single='mlcr generate-run-cmds,inference,_populate-readme \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' -readme_cmd='mlcr --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \ +readme_cmd='mlcr generate-run-cmds,inference,_populate-readme,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' diff --git a/script/run-all-mlperf-models/run-retinanet-sh b/script/run-all-mlperf-models/run-retinanet-sh index c5ede6296..1009fea53 100644 --- a/script/run-all-mlperf-models/run-retinanet-sh +++ b/script/run-all-mlperf-models/run-retinanet-sh @@ -37,26 +37,26 @@ function run_test() { power=' --power=yes --adr.mlperf-power-client.power_server=192.168.0.15 --adr.mlperf-power-client.port=4950 ' #Add your run commands here... -find_performance_cmd='mlcr --tags=generate-run-cmds,inference,_find-performance \ +find_performance_cmd='mlcr generate-run-cmds,inference,_find-performance \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=edge --division=open --scenario=Offline --quiet --test_query_count=$test_query_count $rerun' -submission_cmd='mlcr --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +submission_cmd='mlcr generate-run-cmds,inference,_submission,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' -submission_cmd_scenario='mlcr --tags=generate-run-cmds,inference,_submission --scenario=$scenario \ +submission_cmd_scenario='mlcr generate-run-cmds,inference,_submission --scenario=$scenario \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' -readme_cmd_single='mlcr --tags=generate-run-cmds,inference,_populate-readme \ +readme_cmd_single='mlcr generate-run-cmds,inference,_populate-readme \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' -readme_cmd='mlcr --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \ +readme_cmd='mlcr generate-run-cmds,inference,_populate-readme,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' diff --git a/script/run-all-mlperf-models/template.sh b/script/run-all-mlperf-models/template.sh index ff43cf2fe..4fbd47c73 100644 --- a/script/run-all-mlperf-models/template.sh +++ b/script/run-all-mlperf-models/template.sh @@ -40,26 +40,26 @@ function run_test() { power=${POWER_STRING} #Add your run commands here... -find_performance_cmd='mlcr --tags=generate-run-cmds,inference,_find-performance \ +find_performance_cmd='mlcr generate-run-cmds,inference,_find-performance \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=edge --division=open --scenario=Offline --quiet --test_query_count=$test_query_count $rerun' -submission_cmd='mlcr --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +submission_cmd='mlcr generate-run-cmds,inference,_submission,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' -submission_cmd_scenario='mlcr --tags=generate-run-cmds,inference,_submission --scenario=$scenario \ +submission_cmd_scenario='mlcr generate-run-cmds,inference,_submission --scenario=$scenario \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' -readme_cmd_single='mlcr --tags=generate-run-cmds,inference,_populate-readme \ +readme_cmd_single='mlcr generate-run-cmds,inference,_populate-readme \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' -readme_cmd='mlcr --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \ +readme_cmd='mlcr generate-run-cmds,inference,_populate-readme,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution-mode=valid $power' diff --git a/script/run-docker-container/customize.py b/script/run-docker-container/customize.py index a8b92139b..a504ffd07 100644 --- a/script/run-docker-container/customize.py +++ b/script/run-docker-container/customize.py @@ -23,7 +23,7 @@ def preprocess(i): env['MLC_DOCKER_RUN_SCRIPT_TAGS'] = "run,docker,container" MLC_RUN_CMD = "mlc version" else: - MLC_RUN_CMD = "mlcr --tags=" + \ + MLC_RUN_CMD = "mlcr " + \ env['MLC_DOCKER_RUN_SCRIPT_TAGS'] + ' --quiet' r = mlc.access({'action': 'search', diff --git a/script/run-mlperf-inference-mobilenet-models/meta.yaml b/script/run-mlperf-inference-mobilenet-models/meta.yaml index f78f8b6e7..fd4cf3468 100644 --- a/script/run-mlperf-inference-mobilenet-models/meta.yaml +++ b/script/run-mlperf-inference-mobilenet-models/meta.yaml @@ -16,7 +16,7 @@ docker: results_dir: RESULTS_DIR submission_dir: SUBMISSION_DIR docker_run_final_cmds: - - mlcr --tags=run,mlperf,inference,mobilenet-models,_find-performance --adr.mlperf-inference-implementation.fake_run=True + - mlcr run,mlperf,inference,mobilenet-models,_find-performance --adr.mlperf-inference-implementation.fake_run=True --adr.compiler.tags=gcc fake_run_deps: false mounts: diff --git a/script/run-terraform/README-about.md b/script/run-terraform/README-about.md index 674ebee42..d0a7ba01f 100644 --- a/script/run-terraform/README-about.md +++ b/script/run-terraform/README-about.md @@ -7,6 +7,6 @@ gcloud auth application-default login The above two commands will install google-cloud-cli and authorizes the user to access it. Once done, you can start creating gcp instance using CM commands like below. To destroy an instance just repeat the same command with `--destroy` option. ``` -mlcr --tags=run,terraform,_gcp,_gcp_project.mlperf-inference-tests --cminit +mlcr run,terraform,_gcp,_gcp_project.mlperf-inference-tests --cminit ``` Here, `mlperf-inference-tests` is the name of the google project as created in [Google cloud console](https://console.cloud.google.com/apis/dashboard) From 9900cb4530ce0f5d299f76ba7b523bf61e3bb7ff Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Sun, 16 Feb 2025 23:15:01 +0000 Subject: [PATCH 09/40] Fix run-mobilenet (#233) * Fix mobilenet run-all --- .../run-mobilenet-models.sh | 24 +++++-------------- 1 file changed, 6 insertions(+), 18 deletions(-) diff --git a/script/run-all-mlperf-models/run-mobilenet-models.sh b/script/run-all-mlperf-models/run-mobilenet-models.sh index 4190bf82c..a268c49b1 100644 --- a/script/run-all-mlperf-models/run-mobilenet-models.sh +++ b/script/run-all-mlperf-models/run-mobilenet-models.sh @@ -34,34 +34,22 @@ extra_tags="" # run "$MLC_RUN_CMD" run "mlcr run,mobilenet-models,_tflite,_accuracy-only$extra_tags \ --adr.compiler.tags=gcc \ -${extra_option} \ ---results_dir=$HOME/results_dir" +${extra_option} " run "mlcr run,mobilenet-models,_tflite,_performance-only$extra_tags \ ${POWER} \ --adr.compiler.tags=gcc \ -${extra_option} \ ---results_dir=$HOME/results_dir" +${extra_option}" + -run "mlcr run,mobilenet-models,_tflite,_populate-readme$extra_tags \ -${POWER} \ ---adr.compiler.tags=gcc \ -${extra_option} \ ---results_dir=$HOME/results_dir" run "mlcr run,mobilenet-models,_tflite,_armnn,_neon,_accuracy-only$extra_tags \ --adr.compiler.tags=gcc \ -${extra_option} \ ---results_dir=$HOME/results_dir" +${extra_option} " run "mlcr run,mobilenet-models,_tflite,_armnn,_neon,_performance-only$extra_tags \ ${POWER} \ ${extra_option} \ ---adr.compiler.tags=gcc \ ---results_dir=$HOME/results_dir" +--adr.compiler.tags=gcc" + -run "mlcr run,mobilenet-models,_tflite,_armnn,_neon,_populate-readme$extra_tags \ -${POWER} \ -${extra_option} \ ---adr.compiler.tags=gcc \ ---results_dir=$HOME/results_dir" From 9bbf7cee203e6b27a301aae5743b414a39cdbbc6 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Sun, 16 Feb 2025 23:47:09 +0000 Subject: [PATCH 10/40] Fixes to run-all scripts (#234) * Fix execution_mode in run-all script --- .../run-all-mlperf-models/run-bert-macos.sh | 8 +++---- script/run-all-mlperf-models/run-bert.sh | 17 ++++---------- .../run-cpp-implementation.sh | 20 ++++++++--------- .../run-all-mlperf-models/run-nvidia-4090.sh | 2 +- .../run-resnet50-macos.sh | 22 ++++++------------- script/run-all-mlperf-models/run-resnet50.sh | 8 +++---- script/run-all-mlperf-models/run-retinanet-sh | 17 ++++---------- script/run-all-mlperf-models/template.sh | 8 +++---- 8 files changed, 38 insertions(+), 64 deletions(-) diff --git a/script/run-all-mlperf-models/run-bert-macos.sh b/script/run-all-mlperf-models/run-bert-macos.sh index edf27333a..bc7a75747 100644 --- a/script/run-all-mlperf-models/run-bert-macos.sh +++ b/script/run-all-mlperf-models/run-bert-macos.sh @@ -44,22 +44,22 @@ find_performance_cmd='mlcr generate-run-cmds,inference,_find-performance \ submission_cmd='mlcr generate-run-cmds,inference,_submission,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ ---skip_submission_generation=yes --execution-mode=valid $power' +--skip_submission_generation=yes --execution_mode=valid $power' submission_cmd_scenario='mlcr generate-run-cmds,inference,_submission --scenario=$scenario \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ ---skip_submission_generation=yes --execution-mode=valid $power' +--skip_submission_generation=yes --execution_mode=valid $power' readme_cmd_single='mlcr generate-run-cmds,inference,_populate-readme \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ ---skip_submission_generation=yes --execution-mode=valid $power' +--skip_submission_generation=yes --execution_mode=valid $power' readme_cmd='mlcr generate-run-cmds,inference,_populate-readme,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ ---skip_submission_generation=yes --execution-mode=valid $power' +--skip_submission_generation=yes --execution_mode=valid $power' # run "$MLC_RUN_CMD" run_test "onnxruntime" "100" "reference" "cpu" "$find_performance_cmd" diff --git a/script/run-all-mlperf-models/run-bert.sh b/script/run-all-mlperf-models/run-bert.sh index 23d169dd8..815e144ec 100644 --- a/script/run-all-mlperf-models/run-bert.sh +++ b/script/run-all-mlperf-models/run-bert.sh @@ -44,23 +44,14 @@ find_performance_cmd='mlcr generate-run-cmds,inference,_find-performance \ submission_cmd='mlcr generate-run-cmds,inference,_submission,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ ---category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ ---skip_submission_generation=yes --execution-mode=valid $power' +--category=$category --division=$division --quiet \ +--skip_submission_generation=yes --execution_mode=valid $power' submission_cmd_scenario='mlcr generate-run-cmds,inference,_submission --scenario=$scenario \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ ---category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ ---skip_submission_generation=yes --execution-mode=valid $power' +--category=$category --division=$division --quiet \ +--skip_submission_generation=yes --execution_mode=valid $power' -readme_cmd_single='mlcr generate-run-cmds,inference,_populate-readme \ ---model=$model --implementation=$implementation --device=$device --backend=$backend \ ---category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ ---skip_submission_generation=yes --execution-mode=valid $power' - -readme_cmd='mlcr generate-run-cmds,inference,_populate-readme,_all-scenarios \ ---model=$model --implementation=$implementation --device=$device --backend=$backend \ ---category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ ---skip_submission_generation=yes --execution-mode=valid $power' # run "$MLC_RUN_CMD" run_test "onnxruntime" "20" "reference" "cpu" "$find_performance_cmd" diff --git a/script/run-all-mlperf-models/run-cpp-implementation.sh b/script/run-all-mlperf-models/run-cpp-implementation.sh index a4db90e68..345a57a3d 100644 --- a/script/run-all-mlperf-models/run-cpp-implementation.sh +++ b/script/run-all-mlperf-models/run-cpp-implementation.sh @@ -48,7 +48,7 @@ run "mlcr generate-run-cmds,inference,_submission \ --scenario=Offline \ --category=edge --division=$division --quiet \ --adr.compiler.tags=gcc \ ---execution-mode=valid \ +--execution_mode=valid \ --skip_submission_generation=yes \ ${POWER} \ --results_dir=$HOME/results_dir" @@ -58,7 +58,7 @@ run "mlcr generate-run-cmds,inference,_submission \ --scenario=Offline \ --category=edge --division=$division --quiet \ --adr.compiler.tags=gcc \ ---execution-mode=valid \ +--execution_mode=valid \ --skip_submission_generation=yes \ ${POWER} \ --results_dir=$HOME/results_dir" @@ -68,7 +68,7 @@ run "mlcr generate-run-cmds,inference,_submission \ --scenario=SingleStream \ --category=edge --division=$division --quiet \ --adr.compiler.tags=gcc \ ---execution-mode=valid \ +--execution_mode=valid \ --skip_submission_generation=yes \ ${POWER} \ --results_dir=$HOME/results_dir" @@ -78,7 +78,7 @@ run "mlcr generate-run-cmds,inference,_submission \ --scenario=SingleStream \ --category=edge --division=$division --quiet \ --adr.compiler.tags=gcc \ ---execution-mode=valid \ +--execution_mode=valid \ --skip_submission_generation=yes \ ${POWER} \ --results_dir=$HOME/results_dir" @@ -103,7 +103,7 @@ run "mlcr generate-run-cmds,inference,_submission \ --model=resnet50 --implementation=cpp --device=cuda --backend=onnxruntime \ --category=edge --division=$division --quiet \ --adr.compiler.tags=gcc \ ---execution-mode=valid \ +--execution_mode=valid \ --skip_submission_generation=yes \ ${POWER} \ --results_dir=$HOME/results_dir" @@ -113,7 +113,7 @@ run "mlcr generate-run-cmds,inference,_submission \ --scenario=Offline \ --category=edge --division=$division --quiet \ --adr.compiler.tags=gcc \ ---execution-mode=valid \ +--execution_mode=valid \ --skip_submission_generation=yes \ ${POWER} \ --results_dir=$HOME/results_dir" @@ -125,7 +125,7 @@ run "mlcr generate-run-cmds,inference,_submission \ --scenario=SingleStream \ --category=edge --division=$division --quiet \ --adr.compiler.tags=gcc \ ---execution-mode=valid \ +--execution_mode=valid \ --skip_submission_generation=yes \ ${POWER} \ --results_dir=$HOME/results_dir" @@ -135,7 +135,7 @@ run "mlcr generate-run-cmds,inference,_submission \ --scenario=SingleStream \ --category=edge --division=$division --quiet \ --adr.compiler.tags=gcc \ ---execution-mode=valid \ +--execution_mode=valid \ --skip_submission_generation=yes \ ${POWER} \ --results_dir=$HOME/results_dir" @@ -147,7 +147,7 @@ run "mlcr generate-run-cmds,inference,_submission \ --scenario=MultiStream \ --category=edge --division=$division --quiet \ --adr.compiler.tags=gcc \ ---execution-mode=valid \ +--execution_mode=valid \ --skip_submission_generation=yes \ ${POWER} \ --results_dir=$HOME/results_dir" @@ -157,7 +157,7 @@ run "mlcr generate-run-cmds,inference,_submission \ --scenario=MultiStream \ --category=edge --division=$division --quiet \ --adr.compiler.tags=gcc \ ---execution-mode=valid \ +--execution_mode=valid \ --skip_submission_generation=yes \ ${POWER} \ --results_dir=$HOME/results_dir" diff --git a/script/run-all-mlperf-models/run-nvidia-4090.sh b/script/run-all-mlperf-models/run-nvidia-4090.sh index 1fc37c864..135a270d0 100644 --- a/script/run-all-mlperf-models/run-nvidia-4090.sh +++ b/script/run-all-mlperf-models/run-nvidia-4090.sh @@ -50,7 +50,7 @@ find_performance_cmd='mlcr generate-run-cmds,inference,_find-performance \ submission_cmd='mlcr generate-run-cmds,inference,_submission,_all-scenarios \ ---model=$model --execution-mode=valid --implementation=$implementation --device=$device --backend=$backend --results_dir=$HOME/results_dir \ +--model=$model --execution_mode=valid --implementation=$implementation --device=$device --backend=$backend --results_dir=$HOME/results_dir \ --category=$category --division=$division --skip_submission_generation=yes --quiet $power' #run_model "bert-99.9" "10" "${submission_cmd} --offline_target_qps=1680 --server_target_qps=1520" diff --git a/script/run-all-mlperf-models/run-resnet50-macos.sh b/script/run-all-mlperf-models/run-resnet50-macos.sh index 81b8d7124..5b83b9a9b 100644 --- a/script/run-all-mlperf-models/run-resnet50-macos.sh +++ b/script/run-all-mlperf-models/run-resnet50-macos.sh @@ -43,28 +43,20 @@ find_performance_cmd='mlcr generate-run-cmds,inference,_find-performance \ submission_cmd='mlcr generate-run-cmds,inference,_submission,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ ---category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ ---skip_submission_generation=yes --execution-mode=valid $power' +--category=$category --division=$division --quiet \ +--skip_submission_generation=yes --execution_mode=valid $power' submission_cmd_scenario='mlcr generate-run-cmds,inference,_submission --scenario=$scenario \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ ---category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ ---skip_submission_generation=yes --execution-mode=valid $power' +--category=$category --division=$division --quiet \ +--skip_submission_generation=yes --execution_mode=valid $power' -readme_cmd_single='mlcr generate-run-cmds,inference,_populate-readme \ ---model=$model --implementation=$implementation --device=$device --backend=$backend \ ---category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ ---skip_submission_generation=yes --execution-mode=valid $power' - -readme_cmd='mlcr generate-run-cmds,inference,_populate-readme,_all-scenarios \ ---model=$model --implementation=$implementation --device=$device --backend=$backend \ ---category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ ---skip_submission_generation=yes --execution-mode=valid $power' # run "$MLC_RUN_CMD" -run_test "onnxruntime" "6000" "reference" "cpu" "$find_performance_cmd --rerun" -run_test "tf" "6000" "reference" "cpu" "$find_performance_cmd --rerun" +#run_test "onnxruntime" "6000" "reference" "cpu" "$find_performance_cmd --rerun" +#run_test "tf" "6000" "reference" "cpu" "$find_performance_cmd --rerun" run_test "onnxruntime" "100" "reference" "cpu" "$submission_cmd" + run_test "tf" "100" "reference" "cpu" "$submission_cmd" diff --git a/script/run-all-mlperf-models/run-resnet50.sh b/script/run-all-mlperf-models/run-resnet50.sh index 2ef2c3cff..bd810a277 100644 --- a/script/run-all-mlperf-models/run-resnet50.sh +++ b/script/run-all-mlperf-models/run-resnet50.sh @@ -44,22 +44,22 @@ find_performance_cmd='mlcr generate-run-cmds,inference,_find-performance \ submission_cmd='mlcr generate-run-cmds,inference,_submission,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ ---skip_submission_generation=yes --execution-mode=valid $power' +--skip_submission_generation=yes --execution_mode=valid $power' submission_cmd_scenario='mlcr generate-run-cmds,inference,_submission --scenario=$scenario \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ ---skip_submission_generation=yes --execution-mode=valid $power' +--skip_submission_generation=yes --execution_mode=valid $power' readme_cmd_single='mlcr generate-run-cmds,inference,_populate-readme \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ ---skip_submission_generation=yes --execution-mode=valid $power' +--skip_submission_generation=yes --execution_mode=valid $power' readme_cmd='mlcr generate-run-cmds,inference,_populate-readme,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ ---skip_submission_generation=yes --execution-mode=valid $power' +--skip_submission_generation=yes --execution_mode=valid $power' # run "$MLC_RUN_CMD" run_test "onnxruntime" "200" "reference" "cpu" "$find_performance_cmd" diff --git a/script/run-all-mlperf-models/run-retinanet-sh b/script/run-all-mlperf-models/run-retinanet-sh index 1009fea53..3f10a88ee 100644 --- a/script/run-all-mlperf-models/run-retinanet-sh +++ b/script/run-all-mlperf-models/run-retinanet-sh @@ -43,23 +43,14 @@ find_performance_cmd='mlcr generate-run-cmds,inference,_find-performance \ submission_cmd='mlcr generate-run-cmds,inference,_submission,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ ---category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ ---skip_submission_generation=yes --execution-mode=valid $power' +--category=$category --division=$division --quiet \ +--skip_submission_generation=yes --execution_mode=valid $power' submission_cmd_scenario='mlcr generate-run-cmds,inference,_submission --scenario=$scenario \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ ---category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ ---skip_submission_generation=yes --execution-mode=valid $power' +--category=$category --division=$division --quiet \ +--skip_submission_generation=yes --execution_mode=valid $power' -readme_cmd_single='mlcr generate-run-cmds,inference,_populate-readme \ ---model=$model --implementation=$implementation --device=$device --backend=$backend \ ---category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ ---skip_submission_generation=yes --execution-mode=valid $power' - -readme_cmd='mlcr generate-run-cmds,inference,_populate-readme,_all-scenarios \ ---model=$model --implementation=$implementation --device=$device --backend=$backend \ ---category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ ---skip_submission_generation=yes --execution-mode=valid $power' # run "$MLC_RUN_CMD" run_test "onnxruntime" "50" "reference" "cpu" "$find_performance_cmd" diff --git a/script/run-all-mlperf-models/template.sh b/script/run-all-mlperf-models/template.sh index 4fbd47c73..9a5fb1893 100644 --- a/script/run-all-mlperf-models/template.sh +++ b/script/run-all-mlperf-models/template.sh @@ -47,20 +47,20 @@ find_performance_cmd='mlcr generate-run-cmds,inference,_find-performance \ submission_cmd='mlcr generate-run-cmds,inference,_submission,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ ---skip_submission_generation=yes --execution-mode=valid $power' +--skip_submission_generation=yes --execution_mode=valid $power' submission_cmd_scenario='mlcr generate-run-cmds,inference,_submission --scenario=$scenario \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ ---skip_submission_generation=yes --execution-mode=valid $power' +--skip_submission_generation=yes --execution_mode=valid $power' readme_cmd_single='mlcr generate-run-cmds,inference,_populate-readme \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ ---skip_submission_generation=yes --execution-mode=valid $power' +--skip_submission_generation=yes --execution_mode=valid $power' readme_cmd='mlcr generate-run-cmds,inference,_populate-readme,_all-scenarios \ --model=$model --implementation=$implementation --device=$device --backend=$backend \ --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ ---skip_submission_generation=yes --execution-mode=valid $power' +--skip_submission_generation=yes --execution_mode=valid $power' From fd8269bd93a5f821126e59916ece1d255a70ab66 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Mon, 17 Feb 2025 14:26:05 +0530 Subject: [PATCH 11/40] Fix for issue #236 (#237) * create rclone config after getting rclone * create config after getting rclone --- script/get-ml-model-llama2/meta.yaml | 6 +++--- script/get-ml-model-llama3/meta.yaml | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/script/get-ml-model-llama2/meta.yaml b/script/get-ml-model-llama2/meta.yaml index fd9b43e68..d0e14cf77 100644 --- a/script/get-ml-model-llama2/meta.yaml +++ b/script/get-ml-model-llama2/meta.yaml @@ -90,12 +90,12 @@ variations: env: MLC_DOWNLOAD_SRC: mlcommons prehook_deps: - - tags: get,rclone-config,_mlperf-llama2 - force_cache: yes + - tags: get,rclone enable_if_env: MLC_TMP_REQUIRE_DOWNLOAD: - yes - - tags: get,rclone + - tags: get,rclone-config,_mlperf-llama2 + force_cache: yes enable_if_env: MLC_TMP_REQUIRE_DOWNLOAD: - yes diff --git a/script/get-ml-model-llama3/meta.yaml b/script/get-ml-model-llama3/meta.yaml index 2edbeaa3b..2ae6ba84e 100644 --- a/script/get-ml-model-llama3/meta.yaml +++ b/script/get-ml-model-llama3/meta.yaml @@ -52,12 +52,12 @@ variations: group: download-src default: true prehook_deps: - - tags: get,rclone-config,_mlperf-llama3-1 - force_cache: true + - tags: get,rclone enable_if_env: MLC_TMP_REQUIRE_DOWNLOAD: - yes - - tags: get,rclone + - tags: get,rclone-config,_mlperf-llama3-1 + force_cache: true enable_if_env: MLC_TMP_REQUIRE_DOWNLOAD: - yes From d7e33d7266ab3e015d02fde490fc817489df8d17 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Mon, 17 Feb 2025 14:28:52 +0530 Subject: [PATCH 12/40] Refactored pointpainting model download script (#238) * refactored pointpainting model download * [Automated Commit] Format Codebase [skip ci] --------- Co-authored-by: github-actions[bot] --- .../customize.py | 2 +- .../meta.yaml | 13 +--- script/app-mlperf-inference/meta.yaml | 12 +--- .../COPYRIGHT.md | 0 .../get-ml-model-pointpainting/customize.py | 59 +++++++++++++++++++ script/get-ml-model-pointpainting/meta.yaml | 45 ++++++++++++++ .../get-ml-model-pointpainting/run-rclone.sh | 4 ++ .../run.sh | 0 script/get-ml-model-pointpillars/customize.py | 32 ---------- script/get-ml-model-pointpillars/meta.yaml | 26 -------- .../COPYRIGHT.md | 9 --- .../customize.py | 33 ----------- .../get-ml-model-resnet50-deeplab/meta.yaml | 27 --------- script/get-ml-model-resnet50-deeplab/run.sh | 8 --- script/run-mlperf-inference-app/meta.yaml | 2 +- 15 files changed, 115 insertions(+), 157 deletions(-) rename script/{get-ml-model-pointpillars => get-ml-model-pointpainting}/COPYRIGHT.md (100%) create mode 100644 script/get-ml-model-pointpainting/customize.py create mode 100644 script/get-ml-model-pointpainting/meta.yaml create mode 100644 script/get-ml-model-pointpainting/run-rclone.sh rename script/{get-ml-model-pointpillars => get-ml-model-pointpainting}/run.sh (100%) delete mode 100644 script/get-ml-model-pointpillars/customize.py delete mode 100644 script/get-ml-model-pointpillars/meta.yaml delete mode 100644 script/get-ml-model-resnet50-deeplab/COPYRIGHT.md delete mode 100644 script/get-ml-model-resnet50-deeplab/customize.py delete mode 100644 script/get-ml-model-resnet50-deeplab/meta.yaml delete mode 100644 script/get-ml-model-resnet50-deeplab/run.sh diff --git a/script/app-mlperf-inference-mlcommons-python/customize.py b/script/app-mlperf-inference-mlcommons-python/customize.py index 8144df6e7..80eb8ba57 100644 --- a/script/app-mlperf-inference-mlcommons-python/customize.py +++ b/script/app-mlperf-inference-mlcommons-python/customize.py @@ -537,7 +537,7 @@ def get_run_cmd_reference( cmd = env['MLC_PYTHON_BIN_WITH_PATH'] + " main.py " \ " --dataset waymo" + \ " --dataset-path " + env['MLC_DATASET_WAYMO_PATH'] + \ - " --lidar-path " + env['MLC_ML_MODEL_POINT_PILLARS_PATH'] + \ + " --lidar-path " + env['MLC_ML_MODEL_POINT_PAINTING_PATH'] + \ " --segmentor-path " + env['MLC_ML_MODEL_DPLAB_RESNET50_PATH'] + \ " --scenario " + env['MLC_MLPERF_LOADGEN_SCENARIO'] + \ " --output " + env['MLC_MLPERF_OUTPUT_DIR'] + \ diff --git a/script/app-mlperf-inference-mlcommons-python/meta.yaml b/script/app-mlperf-inference-mlcommons-python/meta.yaml index 2b6795b00..011e46b99 100644 --- a/script/app-mlperf-inference-mlcommons-python/meta.yaml +++ b/script/app-mlperf-inference-mlcommons-python/meta.yaml @@ -66,7 +66,7 @@ input_mapping: multistream_target_latency: MLC_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY network: MLC_NETWORK_LOADGEN sut_servers: MLC_NETWORK_LOADGEN_SUT_SERVERS - pointpillars_checkpoint_path: MLC_ML_MODEL_POINT_PILLARS_PATH + pointpainting_checkpoint_path: MLC_ML_MODEL_POINT_PAINTING_PATH deeplab_resnet50_path: MLC_ML_MODEL_DPLAB_RESNET50_PATH waymo_path: MLC_DATASET_WAYMO_PATH @@ -505,7 +505,7 @@ deps: - "yes" ## pointpainting - - tags: get,ml-model,pointpillars + - tags: get,ml-model,pointpainting names: - pointpillars-model enable_if_env: @@ -514,15 +514,6 @@ deps: skip_if_env: MLC_RUN_STATE_DOCKER: - "yes" - - tags: get,ml-model,resnet50-deeplab - enable_if_env: - MLC_MODEL: - - pointpainting - skip_if_env: - MLC_RUN_STATE_DOCKER: - - "yes" - names: - - resnet50-deeplab-model ######################################################################## # Install datasets diff --git a/script/app-mlperf-inference/meta.yaml b/script/app-mlperf-inference/meta.yaml index 7049671d7..867b5e6c8 100644 --- a/script/app-mlperf-inference/meta.yaml +++ b/script/app-mlperf-inference/meta.yaml @@ -846,18 +846,12 @@ variations: - 'yes' names: - waymo-dataset - - tags: get,ml-model,pointpillars + - tags: get,ml-model,pointpainting enable_if_env: MLC_USE_DATASET_FROM_HOST: - 'yes' names: - - pointpillars-model - - tags: get,ml-model,resnet50-deeplab - enable_if_env: - MLC_USE_DATASET_FROM_HOST: - - 'yes' - names: - - resnet50-deeplab-model + - pointpainting-model posthook_deps: - enable_if_env: MLC_MLPERF_LOADGEN_MODE: @@ -1956,7 +1950,7 @@ docker: - "${{ MLC_DATASET_IGBH_PATH }}:${{ MLC_DATASET_IGBH_PATH }}" - "${{ MLC_ML_MODEL_RGAT_CHECKPOINT_PATH }}:${{ MLC_ML_MODEL_RGAT_CHECKPOINT_PATH }}" - "${{ MLC_DATASET_WAYMO_PATH }}:${{ MLC_DATASET_WAYMO_PATH }}" - - "${{ MLC_ML_MODEL_POINT_PILLARS_PATH }}:${{ MLC_ML_MODEL_POINT_PILLARS_PATH }}" + - "${{ MLC_ML_MODEL_POINT_PAINTING_PATH }}:${{ MLC_ML_MODEL_POINT_PAINTING_PATH }}" - "${{ MLC_ML_MODEL_DPLAB_RESNET50_PATH }}:${{ MLC_ML_MODEL_DPLAB_RESNET50_PATH }}" skip_run_cmd: 'no' shm_size: '32gb' diff --git a/script/get-ml-model-pointpillars/COPYRIGHT.md b/script/get-ml-model-pointpainting/COPYRIGHT.md similarity index 100% rename from script/get-ml-model-pointpillars/COPYRIGHT.md rename to script/get-ml-model-pointpainting/COPYRIGHT.md diff --git a/script/get-ml-model-pointpainting/customize.py b/script/get-ml-model-pointpainting/customize.py new file mode 100644 index 000000000..07b18602d --- /dev/null +++ b/script/get-ml-model-pointpainting/customize.py @@ -0,0 +1,59 @@ +from mlc import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + if os_info['platform'] == "windows": + return {'return': 1, 'error': 'Script not supported in windows yet!'} + + if env.get('MLC_ML_MODEL_POINT_PAINTING_PATH', '') != '': + if not os.path.exists(env['MLC_ML_MODEL_POINT_PAINTING']): + return { + 'return': 1, 'error': f"Provided model path {env['MLC_ML_MODEL_POINT_PAINTING']} does not exist."} + + if env.get('MLC_ML_MODEL_DPLAB_RESNET50_PATH', '') != '': + if not os.path.exists(env['MLC_ML_MODEL_DPLAB_RESNET50_PATH']): + return { + 'return': 1, 'error': f"Provided model path {env['MLC_ML_MODEL_DPLAB_RESNET50_PATH']} does not exist."} + + if env.get('MLC_ML_MODEL_POINT_PAINTING_PATH', '') == '' or env.get( + 'MLC_ML_MODEL_DPLAB_RESNET50_PATH', '') == '': + env['MLC_TMP_REQUIRE_DOWNLOAD'] = "yes" + if env['MLC_DOWNLOAD_SRC'] == "mlcommons": + i['run_script_input']['script_name'] = 'run-rclone' + if env.get('MLC_OUTDIRNAME', '') != '': + env['MLC_ML_MODEL_POINT_PAINTING_TMP_PATH'] = env['MLC_OUTDIRNAME'] + else: + env['MLC_ML_MODEL_POINT_PAINTING_TMP_PATH'] = os.path.join( + os.getcwd(), "model") + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + if env.get('MLC_ML_MODEL_POINT_PAINTING_PATH', '') == '': + if env['MLC_ML_MODEL_PP_FORMAT'] == "onnx": + env['MLC_ML_MODEL_POINT_PAINTING_PATH'] = os.path.join( + env['MLC_ML_MODEL_POINT_PAINTING_TMP_PATH'], "pp.onnx") + else: + env['MLC_ML_MODEL_POINT_PAINTING_PATH'] = os.path.join( + env['MLC_ML_MODEL_POINT_PAINTING_TMP_PATH'], "pp_ep36.pth") + + if env.get('MLC_ML_MODEL_DPLAB_RESNET50_PATH', '') == '': + if env['MLC_ML_MODEL_DPLAB_RESNET50_FORMAT'] == "onnx": + env['MLC_ML_MODEL_DPLAB_RESNET50_PATH'] = os.path.join( + env['MLC_ML_MODEL_POINT_PAINTING_TMP_PATH'], "deeplabv3+.onnx") + else: + env['MLC_ML_MODEL_DPLAB_RESNET50_PATH'] = os.path.join( + env['MLC_ML_MODEL_POINT_PAINTING_TMP_PATH'], + "best_deeplabv3plus_resnet50_waymo_os16.pth") + + return {'return': 0} diff --git a/script/get-ml-model-pointpainting/meta.yaml b/script/get-ml-model-pointpainting/meta.yaml new file mode 100644 index 000000000..b811d58d6 --- /dev/null +++ b/script/get-ml-model-pointpainting/meta.yaml @@ -0,0 +1,45 @@ +alias: get-ml-model-pointpillars +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +tags: +- get +- ml-model +- ml +- model +- pointpainting +uid: 3562621a8994411d +new_env_keys: + - MLC_ML_MODEL_POINT_PAINTING_PATH + - MLC_ML_MODEL_DPLAB_RESNET50_PATH +input_mapping: + pp_path: MLC_ML_MODEL_POINT_PAINTING_PATH + dp_resnet50_path: MLC_ML_MODEL_DPLAB_RESNET50_PATH +variations: + gpu: + default: true + group: device + env: + MLC_ML_MODEL_PP_FORMAT: pth + MLC_ML_MODEL_DPLAB_RESNET50_FORMAT: pth + cpu: + group: device + env: + MLC_ML_MODEL_PP_FORMAT: onnx + MLC_ML_MODEL_DPLAB_RESNET50_FORMAT: onnx + mlc: + group: download-src + default: true + prehook_deps: + - tags: get,rclone + enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - yes + - tags: get,rclone-config,_waymo + force_cache: true + enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - yes + env: + MLC_DOWNLOAD_SRC: mlcommons + diff --git a/script/get-ml-model-pointpainting/run-rclone.sh b/script/get-ml-model-pointpainting/run-rclone.sh new file mode 100644 index 000000000..9b76a1511 --- /dev/null +++ b/script/get-ml-model-pointpainting/run-rclone.sh @@ -0,0 +1,4 @@ +cmd="rclone sync mlc-waymo:waymo_preprocessed_dataset/model ${MLC_ML_MODEL_POINT_PAINTING_TMP_PATH} -P" +echo $cmd +eval $cmd +test $? -eq 0 || exit $? \ No newline at end of file diff --git a/script/get-ml-model-pointpillars/run.sh b/script/get-ml-model-pointpainting/run.sh similarity index 100% rename from script/get-ml-model-pointpillars/run.sh rename to script/get-ml-model-pointpainting/run.sh diff --git a/script/get-ml-model-pointpillars/customize.py b/script/get-ml-model-pointpillars/customize.py deleted file mode 100644 index b6685c889..000000000 --- a/script/get-ml-model-pointpillars/customize.py +++ /dev/null @@ -1,32 +0,0 @@ -from mlc import utils -import os - - -def preprocess(i): - - os_info = i['os_info'] - - env = i['env'] - - if os_info['platform'] == "windows": - return {'return': 1, 'error': 'Script not supported in windows yet!'} - - if env.get('MLC_ML_MODEL_POINT_PILLARS_PATH', '') == '': - return {'return': 1, 'error': 'Please provide path to pointpillars model using tag \\`--pp_path\\`as automatic download of this model is not supported yet.'} - - if os.path.isdir(env['MLC_ML_MODEL_POINT_PILLARS_PATH']): - if env['MLC_ML_MODEL_PP_FORMAT'] == "onnx": - env['MLC_ML_MODEL_POINT_PILLARS_PATH'] = os.path.join( - env['MLC_ML_MODEL_POINT_PILLARS_PATH'], "pp.onnx") - else: - env['MLC_ML_MODEL_POINT_PILLARS_PATH'] = os.path.join( - env['MLC_ML_MODEL_POINT_PILLARS_PATH'], "pp_ep36.pth") - - return {'return': 0} - - -def postprocess(i): - - env = i['env'] - - return {'return': 0} diff --git a/script/get-ml-model-pointpillars/meta.yaml b/script/get-ml-model-pointpillars/meta.yaml deleted file mode 100644 index 18470e4c0..000000000 --- a/script/get-ml-model-pointpillars/meta.yaml +++ /dev/null @@ -1,26 +0,0 @@ -alias: get-ml-model-pointpillars -automation_alias: script -automation_uid: 5b4e0237da074764 -cache: true -tags: -- get -- ml-model -- ml -- model -- pointpillars -uid: 3562621a8994411d -new_env_keys: - - MLC_ML_MODEL_POINT_PILLARS_PATH -input_mapping: - pp_path: MLC_ML_MODEL_POINT_PILLARS_PATH -variations: - gpu: - default: true - group: device - env: - MLC_ML_MODEL_PP_FORMAT: pth - cpu: - group: device - env: - MLC_ML_MODEL_PP_FORMAT: onnx - diff --git a/script/get-ml-model-resnet50-deeplab/COPYRIGHT.md b/script/get-ml-model-resnet50-deeplab/COPYRIGHT.md deleted file mode 100644 index d2ceead84..000000000 --- a/script/get-ml-model-resnet50-deeplab/COPYRIGHT.md +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright Notice - -© 2025-2026 MLCommons. All Rights Reserved. - -This file is licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License can be obtained at: - -[Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0) - -Unless required by applicable law or agreed to in writing, software distributed under the License is provided on an "AS IS" basis, without warranties or conditions of any kind, either express or implied. Please refer to the License for the specific language governing permissions and limitations under the License. diff --git a/script/get-ml-model-resnet50-deeplab/customize.py b/script/get-ml-model-resnet50-deeplab/customize.py deleted file mode 100644 index 0df3b1c3f..000000000 --- a/script/get-ml-model-resnet50-deeplab/customize.py +++ /dev/null @@ -1,33 +0,0 @@ -from mlc import utils -import os - - -def preprocess(i): - - os_info = i['os_info'] - - env = i['env'] - - if os_info['platform'] == "windows": - return {'return': 1, 'error': 'Script not supported in windows yet!'} - - if env.get('MLC_ML_MODEL_DPLAB_RESNET50_PATH', '') == '': - return {'return': 1, 'error': 'Please provide path to deeplab resnet 50 model using tag \\`--dp_resnet50_path\\`as automatic download of this dataset is not supported yet.'} - - if os.path.isdir(env['MLC_ML_MODEL_DPLAB_RESNET50_PATH']): - if env['MLC_ML_MODEL_DPLAB_RESNET50_FORMAT'] == "onnx": - env['MLC_ML_MODEL_DPLAB_RESNET50_PATH'] = os.path.join( - env['MLC_ML_MODEL_DPLAB_RESNET50_PATH'], "deeplabv3+.onnx") - else: - env['MLC_ML_MODEL_DPLAB_RESNET50_PATH'] = os.path.join( - env['MLC_ML_MODEL_DPLAB_RESNET50_PATH'], - "best_deeplabv3plus_resnet50_waymo_os16.pth") - - return {'return': 0} - - -def postprocess(i): - - env = i['env'] - - return {'return': 0} diff --git a/script/get-ml-model-resnet50-deeplab/meta.yaml b/script/get-ml-model-resnet50-deeplab/meta.yaml deleted file mode 100644 index c8c8b84e1..000000000 --- a/script/get-ml-model-resnet50-deeplab/meta.yaml +++ /dev/null @@ -1,27 +0,0 @@ -alias: get-dataset-deeplab-resnet50 -automation_alias: script -automation_uid: 5b4e0237da074764 -cache: true -tags: -- get -- ml-model -- ml -- model -- resnet50-deeplab -- resnet50 -- deeplab -uid: 93097b691a6a4fce -new_env_keys: - - MLC_ML_MODEL_DPLAB_RESNET50_PATH -input_mapping: - dp_resnet50_path: MLC_ML_MODEL_DPLAB_RESNET50_PATH -variations: - gpu: - default: true - group: device - env: - MLC_ML_MODEL_DPLAB_RESNET50_FORMAT: pth - cpu: - group: device - env: - MLC_ML_MODEL_DPLAB_RESNET50_FORMAT: onnx diff --git a/script/get-ml-model-resnet50-deeplab/run.sh b/script/get-ml-model-resnet50-deeplab/run.sh deleted file mode 100644 index 3197bb8ad..000000000 --- a/script/get-ml-model-resnet50-deeplab/run.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash - -#CM Script location: ${MLC_TMP_CURRENT_SCRIPT_PATH} - -#To export any variable -#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out - -#${MLC_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency diff --git a/script/run-mlperf-inference-app/meta.yaml b/script/run-mlperf-inference-app/meta.yaml index cc8404fbd..1dc6ef242 100644 --- a/script/run-mlperf-inference-app/meta.yaml +++ b/script/run-mlperf-inference-app/meta.yaml @@ -119,7 +119,7 @@ input_mapping: use_dataset_from_host: MLC_USE_DATASET_FROM_HOST use_model_from_host: MLC_USE_MODEL_FROM_HOST rgat_checkpoint_path: RGAT_CHECKPOINT_PATH - pointpillars_checkpoint_path: MLC_ML_MODEL_POINT_PILLARS_PATH + pointpainting_checkpoint_path: MLC_ML_MODEL_POINT_PILLARS_PATH deeplab_resnet50_path: MLC_ML_MODEL_DPLAB_RESNET50_PATH waymo_path: MLC_DATASET_WAYMO_PATH nm_model_zoo_stub: MLC_MLPERF_NEURALMAGIC_MODEL_ZOO_STUB From 1d421fcbcf2e7f723083648363551a3e96ea4aa0 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Mon, 17 Feb 2025 14:31:31 +0530 Subject: [PATCH 13/40] Add support for downloading waymo from mlcommons checkpoint (#235) * add support for downloading mlcommons checkpoint --- script/get-dataset-waymo/customize.py | 19 +++++++++++++------ script/get-dataset-waymo/meta.yaml | 15 +++++++++++++++ script/get-dataset-waymo/run-rclone.sh | 4 ++++ script/get-rclone-config/meta.yaml | 4 ++++ 4 files changed, 36 insertions(+), 6 deletions(-) create mode 100644 script/get-dataset-waymo/run-rclone.sh diff --git a/script/get-dataset-waymo/customize.py b/script/get-dataset-waymo/customize.py index 273feef06..cb625f443 100644 --- a/script/get-dataset-waymo/customize.py +++ b/script/get-dataset-waymo/customize.py @@ -11,12 +11,19 @@ def preprocess(i): if os_info['platform'] == "windows": return {'return': 1, 'error': 'Script not supported in windows yet!'} - if env.get('MLC_DATASET_WAYMO_PATH', '') == '': - return {'return': 1, 'error': 'Please provide path to kitti dataset using tag \\`--waymo_path\\`as automatic download of this dataset is not supported yet.'} - - if not os.path.exists(env['MLC_DATASET_WAYMO_PATH']): - return { - 'return': 1, 'error': f"Path {env['MLC_DATASET_WAYMO_PATH']} does not exists!"} + if env.get('MLC_DATASET_WAYMO_PATH', '') != '': + if not os.path.exists(env['MLC_DATASET_WAYMO_PATH']): + return { + 'return': 1, 'error': f"Path {env['MLC_DATASET_WAYMO_PATH']} does not exists!"} + else: + env['MLC_TMP_REQUIRE_DOWNLOAD'] = "yes" + if env['MLC_DOWNLOAD_SRC'] == "mlcommons": + i['run_script_input']['script_name'] = 'run-rclone' + if env.get('MLC_OUTDIRNAME', '') != '': + env['MLC_DATASET_WAYMO_PATH'] = env['MLC_OUTDIRNAME'] + else: + env['MLC_DATASET_WAYMO_PATH'] = os.path.join( + os.getcwd(), "kitti_format") return {'return': 0} diff --git a/script/get-dataset-waymo/meta.yaml b/script/get-dataset-waymo/meta.yaml index bfbba995f..63bbf2472 100644 --- a/script/get-dataset-waymo/meta.yaml +++ b/script/get-dataset-waymo/meta.yaml @@ -17,3 +17,18 @@ variations: group: dataset-format env: MLC_DATASET_WAYMO_FORMAT: kitti + mlc: + group: download-src + default: true + prehook_deps: + - tags: get,rclone + enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - yes + - tags: get,rclone-config,_waymo + force_cache: true + enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - yes + env: + MLC_DOWNLOAD_SRC: mlcommons diff --git a/script/get-dataset-waymo/run-rclone.sh b/script/get-dataset-waymo/run-rclone.sh new file mode 100644 index 000000000..caca3ff98 --- /dev/null +++ b/script/get-dataset-waymo/run-rclone.sh @@ -0,0 +1,4 @@ +cmd="rclone sync mlc-waymo:waymo_preprocessed_dataset/kitti_format ${MLC_DATASET_WAYMO_PATH} -P" +echo $cmd +eval $cmd +test $? -eq 0 || exit $? \ No newline at end of file diff --git a/script/get-rclone-config/meta.yaml b/script/get-rclone-config/meta.yaml index 8cc949d25..f2f3332f0 100644 --- a/script/get-rclone-config/meta.yaml +++ b/script/get-rclone-config/meta.yaml @@ -19,3 +19,7 @@ variations: env: MLC_RCLONE_CONFIG_CMD: 'rclone config create mlc-llama3-1 drive config_is_local=false scope=drive.readonly root_folder_id=12K-2yvmr1ZSZ7SLrhidCbWc0BriN98am' MLC_RCLONE_CONNECT_CMD: 'rclone config reconnect mlc-llama3-1:' + waymo: + env: + MLC_RCLONE_CONFIG_CMD: 'rclone config create mlc-waymo drive config_is_local=false scope=drive.readonly root_folder_id=1xbfnaUurFeXliFFl1i1gj48eRU2NDiH5' + MLC_RCLONE_CONNECT_CMD: 'rclone config reconnect mlc-waymo:' From dd6f2a8a79de14ae7d9c435ba7a70e696de81e58 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Mon, 17 Feb 2025 23:24:29 +0000 Subject: [PATCH 14/40] Added get-aocc script (#240) * Added get-aocc --- script/get-aocc/customize.py | 112 +++++++++++++++++++++++++++++++++++ script/get-aocc/meta.yaml | 44 ++++++++++++++ script/get-aocc/run.bat | 3 + script/get-aocc/run.sh | 8 +++ 4 files changed, 167 insertions(+) create mode 100644 script/get-aocc/customize.py create mode 100644 script/get-aocc/meta.yaml create mode 100644 script/get-aocc/run.bat create mode 100644 script/get-aocc/run.sh diff --git a/script/get-aocc/customize.py b/script/get-aocc/customize.py new file mode 100644 index 000000000..383cac39f --- /dev/null +++ b/script/get-aocc/customize.py @@ -0,0 +1,112 @@ +from mlc import utils +import os + + +def predeps(i): + os_info = i['os_info'] + + env = i['env'] + if env.get('MLC_AOCC_TAR_FILE_PATH', '') != '': + env['MLC_AOCC_NEEDS_TAR'] = 'yes' + + return {'return': 0} + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + exe_c = 'clang.exe' if os_info['platform'] == 'windows' else 'clang' + + if env.get('MLC_AOCC_DIR_PATH', '') != '' and env.get( + 'MLC_AOCC_BIN_WITH_PATH', '') == '': + for f in os.listdir(env['MLC_AOCC_DIR_PATH']): + if os.path.exists(os.path.join( + env['MLC_AOCC_DIR_PATH'], f, "bin", exe_c)): + env['MLC_AOCC_BIN_WITH_PATH'] = os.path.join( + env['MLC_AOCC_DIR_PATH'], f, "bin", exe_c) + + if env.get('MLC_HOST_OS_FLAVOR', '') == 'rhel': + if "12" in env.get('MLC_VERSION', '') or "12" in env.get( + 'MLC_VERSION_MIN', ''): + if env.get('MLC_TMP_PATH', '') == '': + env['MLC_TMP_PATH'] = '' + env['MLC_TMP_PATH'] += "/opt/rh/aocc/root/usr/bin" + env['MLC_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes' + + if 'MLC_AOCC_BIN_WITH_PATH' not in env: + r = i['automation'].find_artifact({'file_name': exe_c, + 'env': env, + 'os_info': os_info, + 'default_path_env_key': 'PATH', + 'detect_version': True, + 'env_path_key': 'MLC_AOCC_BIN_WITH_PATH', + 'run_script_input': i['run_script_input'], + 'recursion_spaces': i['recursion_spaces']}) + if r['return'] > 0: + + return r + + return {'return': 0} + + +def detect_version(i): + r = i['automation'].parse_version({'match_text': r'AMD\s+clang\sversion\s([\d.]+)', + 'group_number': 1, + 'env_key': 'MLC_AOCC_VERSION', + 'which_env': i['env']}) + if r['return'] > 0: + return r + version = r['version'] + + print(i['recursion_spaces'] + ' Detected version: {}'.format(version)) + + return {'return': 0, 'version': version} + + +def postprocess(i): + + env = i['env'] + r = detect_version(i) + if r['return'] > 0: + return r + + env['MLC_COMPILER_FAMILY'] = 'AOCC' + version = r['version'] + env['MLC_COMPILER_VERSION'] = env['MLC_AOCC_VERSION'] + env['MLC_AOCC_CACHE_TAGS'] = 'version-' + version + env['MLC_COMPILER_CACHE_TAGS'] = 'version-' + version + ',family-aocc' + + found_file_path = env['MLC_AOCC_BIN_WITH_PATH'] + + found_path = os.path.dirname(found_file_path) + + env['MLC_AOCC_INSTALLED_PATH'] = found_path + + file_name_c = os.path.basename(found_file_path) + file_name_cpp = file_name_c.replace('clang', 'clang++') + env['FILE_NAME_CPP'] = file_name_cpp + + env['MLC_AOCC_BIN'] = file_name_c + + # General compiler for general program compilation + env['MLC_C_COMPILER_BIN'] = file_name_c + env['MLC_C_COMPILER_FLAG_OUTPUT'] = '-o ' + env['MLC_C_COMPILER_WITH_PATH'] = found_file_path + env['MLC_C_COMPILER_FLAG_VERSION'] = '--version' + + env['MLC_CXX_COMPILER_BIN'] = file_name_cpp + env['MLC_CXX_COMPILER_WITH_PATH'] = os.path.join(found_path, file_name_cpp) + env['MLC_CXX_COMPILER_FLAG_OUTPUT'] = '-o ' + env['MLC_CXX_COMPILER_FLAG_VERSION'] = '--version' + + env['MLC_COMPILER_FLAGS_FAST'] = "-O3" + env['MLC_LINKER_FLAGS_FAST'] = "-O3" + env['MLC_COMPILER_FLAGS_DEBUG'] = "-O0" + env['MLC_LINKER_FLAGS_DEBUG'] = "-O0" + env['MLC_COMPILER_FLAGS_DEFAULT'] = "-O2" + env['MLC_LINKER_FLAGS_DEFAULT'] = "-O2" + + return {'return': 0, 'version': version} diff --git a/script/get-aocc/meta.yaml b/script/get-aocc/meta.yaml new file mode 100644 index 000000000..80a21d311 --- /dev/null +++ b/script/get-aocc/meta.yaml @@ -0,0 +1,44 @@ +alias: get-aocc +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: Compiler automation +clean_files: [] +deps: +- tags: detect,os +- tags: extract,file + update_tags_from_env_with_prefix: + _path.: + - MLC_AOCC_TAR_FILE_PATH + force_cache: true + env: + MLC_EXTRACT_FINAL_ENV_NAME: MLC_AOCC_DIR_PATH + MLC_EXTRACT_TO_FOLDER: aocc_install + enable_if_env: + MLC_AOCC_NEEDS_TAR: + - yes + +input_mapping: + tar_file_path: MLC_AOCC_TAR_FILE_PATH + +name: Detect or install AOCC compiler +new_env_keys: +- MLC_AOCC_* +- MLC_C_COMPILER_* +- MLC_CXX_COMPILER_* +- MLC_COMPILER_* +- MLC_LINKER_* +- + CFLAGS +- + CXXFLAGS +- + FFLAGS +- + LDFLAGS +- +MLC_HOST_OS_DEFAULT_INCLUDE_PATH +- +PATH +post_depsq: +- tags: get,compiler-flags +sort: 500 +tags: +- compiler +- get +- aocc +uid: 1ceb0656e99a44ec diff --git a/script/get-aocc/run.bat b/script/get-aocc/run.bat new file mode 100644 index 000000000..5a08d5b62 --- /dev/null +++ b/script/get-aocc/run.bat @@ -0,0 +1,3 @@ +%MLC_AOCC_BIN_WITH_PATH% --version > tmp-ver.out +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + diff --git a/script/get-aocc/run.sh b/script/get-aocc/run.sh new file mode 100644 index 000000000..fe19b6c8d --- /dev/null +++ b/script/get-aocc/run.sh @@ -0,0 +1,8 @@ +#!/bin/bash +aocc_bin=${MLC_AOCC_BIN_WITH_PATH} +echo "${aocc_bin} --version" + +${aocc_bin} --version > tmp-ver.out +test $? -eq 0 || exit $? + +cat tmp-ver.out From b808608410dc119059f43246b5aeba24a860aa4e Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Tue, 18 Feb 2025 18:25:47 +0000 Subject: [PATCH 15/40] Minor fixes to improve submission generation experience (#242) --- script/app-mlperf-inference-mlcommons-python/meta.yaml | 1 + script/app-mlperf-inference/customize.py | 2 +- script/app-mlperf-inference/meta.yaml | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/script/app-mlperf-inference-mlcommons-python/meta.yaml b/script/app-mlperf-inference-mlcommons-python/meta.yaml index 011e46b99..0eadfa0ff 100644 --- a/script/app-mlperf-inference-mlcommons-python/meta.yaml +++ b/script/app-mlperf-inference-mlcommons-python/meta.yaml @@ -885,6 +885,7 @@ variations: ENQUEUE_NUM_THREADS: 2 MLC_MLPERF_LOADGEN_MAX_BATCHSIZE: 16 MLC_MLPERF_VISION_DATASET_OPTION: imagenet_pytorch + MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME: <<>> tvm-onnx: diff --git a/script/app-mlperf-inference/customize.py b/script/app-mlperf-inference/customize.py index 34bd3e046..ab421f248 100644 --- a/script/app-mlperf-inference/customize.py +++ b/script/app-mlperf-inference/customize.py @@ -254,7 +254,7 @@ def postprocess(i): measurements['starting_weights_filename'] = env.get( 'MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME', env.get( 'MLC_ML_MODEL_FILE', measurements.get( - 'starting_weights_filename', ''))) + 'starting_weights_filename', 'TBD'))) measurements['retraining'] = env.get( 'MLC_ML_MODEL_RETRAINING', measurements.get( 'retraining', 'no')) diff --git a/script/app-mlperf-inference/meta.yaml b/script/app-mlperf-inference/meta.yaml index 867b5e6c8..846936234 100644 --- a/script/app-mlperf-inference/meta.yaml +++ b/script/app-mlperf-inference/meta.yaml @@ -27,7 +27,7 @@ default_env: MLC_MLPERF_RUN_STYLE: test MLC_TEST_QUERY_COUNT: '10' MLC_MLPERF_QUANTIZATION: off - MLC_GET_PLATFORM_DETAILS: yes + MLC_GET_PLATFORM_DETAILS: no env: MLC_MLPERF_PRINT_SUMMARY: "no" From 224cf56d8a2b7aa5b68107ea4432a9ad3a2b21ac Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Wed, 19 Feb 2025 01:49:00 +0530 Subject: [PATCH 16/40] Make full the default variation for retinanet dataset (#241) * make full the default variation for retinanet dataset * Test only for 50 samples due to limited space --- script/get-preprocessed-dataset-openimages/meta.yaml | 2 +- script/run-mlperf-inference-app/meta.yaml | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/script/get-preprocessed-dataset-openimages/meta.yaml b/script/get-preprocessed-dataset-openimages/meta.yaml index 20583814d..12d907289 100644 --- a/script/get-preprocessed-dataset-openimages/meta.yaml +++ b/script/get-preprocessed-dataset-openimages/meta.yaml @@ -52,7 +52,6 @@ variations: ad: original-dataset: tags: _50 - default: true env: MLC_DATASET_SIZE: '50' group: dataset-count @@ -139,6 +138,7 @@ variations: group: dataset-precision full: group: dataset-count + default: true full,validation: ad: original-dataset: diff --git a/script/run-mlperf-inference-app/meta.yaml b/script/run-mlperf-inference-app/meta.yaml index 1dc6ef242..bf8b741a1 100644 --- a/script/run-mlperf-inference-app/meta.yaml +++ b/script/run-mlperf-inference-app/meta.yaml @@ -383,6 +383,8 @@ variations: add_deps_recursive: submission-checker: tags: _short-run + openimages-preprocessed: + tags: _50 default: 'true' env: MLC_MLPERF_SUBMISSION_DIVISION: open From b5a4fd6504f22b6c8b609387450e304976f14982 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Tue, 18 Feb 2025 21:30:20 +0000 Subject: [PATCH 17/40] Fixes for mlperf inference submissions (#243) * Fix for R50 TF run when mobilenet datasets are present in cache * Cleanup run-resnet50 --- .../meta.yaml | 2 +- script/run-all-mlperf-models/run-resnet50.sh | 30 +++++-------------- 2 files changed, 8 insertions(+), 24 deletions(-) diff --git a/script/app-mlperf-inference-mlcommons-python/meta.yaml b/script/app-mlperf-inference-mlcommons-python/meta.yaml index 0eadfa0ff..7ad40f9bc 100644 --- a/script/app-mlperf-inference-mlcommons-python/meta.yaml +++ b/script/app-mlperf-inference-mlcommons-python/meta.yaml @@ -519,7 +519,7 @@ deps: # Install datasets ## ImageNet (small for tests) - - tags: get,dataset,image-classification,imagenet,preprocessed + - tags: get,dataset,image-classification,imagenet,preprocessed,-_for.mobilenet names: - imagenet-preprocessed enable_if_env: diff --git a/script/run-all-mlperf-models/run-resnet50.sh b/script/run-all-mlperf-models/run-resnet50.sh index bd810a277..a2144bf0a 100644 --- a/script/run-all-mlperf-models/run-resnet50.sh +++ b/script/run-all-mlperf-models/run-resnet50.sh @@ -35,6 +35,7 @@ function run_test() { run "$5" } power=' --power=yes --adr.mlperf-power-client.power_server=192.168.0.15 --adr.mlperf-power-client.port=4950 ' +power='' #Add your run commands here... find_performance_cmd='mlcr generate-run-cmds,inference,_find-performance \ @@ -51,37 +52,20 @@ submission_cmd_scenario='mlcr generate-run-cmds,inference,_submission --scenari --category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ --skip_submission_generation=yes --execution_mode=valid $power' -readme_cmd_single='mlcr generate-run-cmds,inference,_populate-readme \ ---model=$model --implementation=$implementation --device=$device --backend=$backend \ ---category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ ---skip_submission_generation=yes --execution_mode=valid $power' - -readme_cmd='mlcr generate-run-cmds,inference,_populate-readme,_all-scenarios \ ---model=$model --implementation=$implementation --device=$device --backend=$backend \ ---category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ ---skip_submission_generation=yes --execution_mode=valid $power' # run "$MLC_RUN_CMD" run_test "onnxruntime" "200" "reference" "cpu" "$find_performance_cmd" run_test "tf" "200" "reference" "cpu" "$find_performance_cmd" -run_test "onnxruntime" "10000" "reference" "cuda" "$find_performance_cmd" -run_test "tf" "20000" "reference" "cuda" "$find_performance_cmd" run_test "onnxruntime" "100" "reference" "cpu" "$submission_cmd" run_test "tf" "100" "reference" "cpu" "$submission_cmd" scenario="SingleStream" run_test "tflite" "100" "tflite-cpp" "cpu" "$submission_cmd_scenario --adr.compiler.tags=gcc" run_test "tflite" "100" "tflite-cpp" "cpu" "$submission_cmd_scenario --adr.compiler.tags=gcc --adr.mlperf-inference-implementation.compressed_dataset=on" + + +run_test "onnxruntime" "10000" "reference" "cuda" "$find_performance_cmd" +run_test "tf" "20000" "reference" "cuda" "$find_performance_cmd" run_test "onnxruntime" "100" "reference" "cuda" "$submission_cmd " -scenario="Offline" -run_test "tf" "100" "reference" "cuda" "$submission_cmd_scenario" -scenario="SingleStream" -run_test "tf" "100" "reference" "cuda" "$submission_cmd_scenario" - -run_test "onnxruntime" "100" "reference" "cpu" "$readme_cmd" -run_test "tf" "100" "reference" "cpu" "$readme_cmd" -run_test "tflite" "100" "tflite-cpp" "cpu" "$readme_cmd_single --adr.compiler.tags=gcc --scenario=SingleStream" -run_test "tflite" "100" "tflite-cpp" "cpu" "$readme_cmd_single --adr.compiler.tags=gcc --scenario=SingleStream --adr.mlperf-inference-implementation.compressed_dataset=on" -run_test "onnxruntime" "100" "reference" "cuda" "$readme_cmd --scenario=SingleStream" -run_test "tf" "100" "reference" "cuda" "$readme_cmd_single --scenario=SingleStream" -run_test "tf" "100" "reference" "cuda" "$readme_cmd_single --scenario=Offline" +run_test "tf" "100" "reference" "cuda" "$submission_cmd" + From 4bab9addef93993ed78d07ca422694625c44db6e Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Tue, 18 Feb 2025 22:39:14 +0000 Subject: [PATCH 18/40] Update meta.yaml (#244) --- script/app-mlperf-inference-mlcommons-python/meta.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/script/app-mlperf-inference-mlcommons-python/meta.yaml b/script/app-mlperf-inference-mlcommons-python/meta.yaml index 7ad40f9bc..720d59514 100644 --- a/script/app-mlperf-inference-mlcommons-python/meta.yaml +++ b/script/app-mlperf-inference-mlcommons-python/meta.yaml @@ -1280,6 +1280,7 @@ variations: - protobuf version_max: "4.23.4" version_max_usable: "4.23.4" + version_min: "3.20.3" enable_if_env: MLC_MLPERF_BACKEND: - tf From af51c745ce2a498c95423591e173cc693e60ecba Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Wed, 19 Feb 2025 12:28:48 +0530 Subject: [PATCH 19/40] Exit condition provided for commit (#245) --- script/push-mlperf-inference-results-to-github/run.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/script/push-mlperf-inference-results-to-github/run.sh b/script/push-mlperf-inference-results-to-github/run.sh index 53a297cf9..ffac61801 100644 --- a/script/push-mlperf-inference-results-to-github/run.sh +++ b/script/push-mlperf-inference-results-to-github/run.sh @@ -17,6 +17,7 @@ fi test $? -eq 0 || exit $? git commit -a -m "${MLC_MLPERF_RESULTS_REPO_COMMIT_MESSAGE}" +test $? -eq 0 || exit $? echo ${MLC_GIT_PUSH_CMD} ${MLC_GIT_PUSH_CMD} From 86f7e110ce0d28a0c676402733517670548c7641 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Thu, 20 Feb 2025 19:24:00 +0000 Subject: [PATCH 20/40] Fixes for mlperf submission (#249) * Fix pythron version for bert deepsparse --- script/get-onnxruntime-prebuilt/meta.yaml | 2 +- .../run-cpp-implementation.sh | 24 +++++++++---------- .../run-all-mlperf-models/run-pruned-bert.sh | 4 ++-- 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/script/get-onnxruntime-prebuilt/meta.yaml b/script/get-onnxruntime-prebuilt/meta.yaml index 57078077a..3a3a185a9 100644 --- a/script/get-onnxruntime-prebuilt/meta.yaml +++ b/script/get-onnxruntime-prebuilt/meta.yaml @@ -4,7 +4,7 @@ automation_uid: 5b4e0237da074764 cache: true category: AI/ML frameworks clean_files: [] -default_version: 1.16.3 +default_version: 1.20.1 deps: - tags: detect,os new_env_keys: diff --git a/script/run-all-mlperf-models/run-cpp-implementation.sh b/script/run-all-mlperf-models/run-cpp-implementation.sh index 345a57a3d..3d4c7b93d 100644 --- a/script/run-all-mlperf-models/run-cpp-implementation.sh +++ b/script/run-all-mlperf-models/run-cpp-implementation.sh @@ -27,9 +27,9 @@ division="closed" # run "$MLC_RUN_CMD" POWER=" --power=yes --adr.mlperf-power-client.power_server=192.168.0.15 --adr.mlperf-power-client.port=4950 " -POWER="" +POWER=" --env.MLC_GET_PLATFORM_DETAILS=no" -run "mlcr set,system,performance,mode" +#run "mlcr set,system,performance,mode" #cpp run "mlcr generate-run-cmds,inference,_find-performance \ @@ -51,7 +51,7 @@ run "mlcr generate-run-cmds,inference,_submission \ --execution_mode=valid \ --skip_submission_generation=yes \ ${POWER} \ ---results_dir=$HOME/results_dir" +" run "mlcr generate-run-cmds,inference,_submission \ --model=retinanet --implementation=cpp --device=cpu --backend=onnxruntime \ @@ -61,7 +61,7 @@ run "mlcr generate-run-cmds,inference,_submission \ --execution_mode=valid \ --skip_submission_generation=yes \ ${POWER} \ ---results_dir=$HOME/results_dir" +" run "mlcr generate-run-cmds,inference,_submission \ --model=resnet50 --implementation=cpp --device=cpu --backend=onnxruntime \ @@ -71,7 +71,7 @@ run "mlcr generate-run-cmds,inference,_submission \ --execution_mode=valid \ --skip_submission_generation=yes \ ${POWER} \ ---results_dir=$HOME/results_dir" +" run "mlcr generate-run-cmds,inference,_submission \ --model=retinanet --implementation=cpp --device=cpu --backend=onnxruntime \ @@ -81,7 +81,7 @@ run "mlcr generate-run-cmds,inference,_submission \ --execution_mode=valid \ --skip_submission_generation=yes \ ${POWER} \ ---results_dir=$HOME/results_dir" +" # GPU @@ -106,7 +106,7 @@ run "mlcr generate-run-cmds,inference,_submission \ --execution_mode=valid \ --skip_submission_generation=yes \ ${POWER} \ ---results_dir=$HOME/results_dir" +" run "mlcr generate-run-cmds,inference,_submission \ --model=retinanet --implementation=cpp --device=cuda --backend=onnxruntime \ @@ -116,7 +116,7 @@ run "mlcr generate-run-cmds,inference,_submission \ --execution_mode=valid \ --skip_submission_generation=yes \ ${POWER} \ ---results_dir=$HOME/results_dir" +" run "mlcr generate-run-cmds,inference,_submission \ @@ -128,7 +128,7 @@ run "mlcr generate-run-cmds,inference,_submission \ --execution_mode=valid \ --skip_submission_generation=yes \ ${POWER} \ ---results_dir=$HOME/results_dir" +" run "mlcr generate-run-cmds,inference,_submission \ --model=retinanet --implementation=cpp --device=cuda --backend=onnxruntime \ @@ -138,7 +138,7 @@ run "mlcr generate-run-cmds,inference,_submission \ --execution_mode=valid \ --skip_submission_generation=yes \ ${POWER} \ ---results_dir=$HOME/results_dir" +" #multistream run "mlcr generate-run-cmds,inference,_submission \ @@ -150,7 +150,7 @@ run "mlcr generate-run-cmds,inference,_submission \ --execution_mode=valid \ --skip_submission_generation=yes \ ${POWER} \ ---results_dir=$HOME/results_dir" +" run "mlcr generate-run-cmds,inference,_submission \ --model=retinanet --implementation=cpp --device=cuda --backend=onnxruntime \ @@ -160,4 +160,4 @@ run "mlcr generate-run-cmds,inference,_submission \ --execution_mode=valid \ --skip_submission_generation=yes \ ${POWER} \ ---results_dir=$HOME/results_dir" +" diff --git a/script/run-all-mlperf-models/run-pruned-bert.sh b/script/run-all-mlperf-models/run-pruned-bert.sh index f5ed64042..a0b7af75a 100644 --- a/script/run-all-mlperf-models/run-pruned-bert.sh +++ b/script/run-all-mlperf-models/run-pruned-bert.sh @@ -46,7 +46,7 @@ scenario="Offline" if [[ $scenario == "Offline" ]]; then for stub in ${zoo_stub_list[@]}; do cmd="mlcr run,mlperf,inference,generate-run-cmds,_find-performance \ - --adr.python.version_min=3.8 \ + --adr.python.version=3.9.12 \ --implementation=reference \ --model=bert-99 \ --precision=int8 \ @@ -65,7 +65,7 @@ fi for stub in ${zoo_stub_list[@]}; do cmd="mlcr run,mlperf,inference,generate-run-cmds \ - --adr.python.version_min=3.8 \ + --adr.python.version=3.9.12 \ --adr.compiler.tags=gcc \ --implementation=reference \ --model=bert-99 \ From de26510cf71a1087cb75f990b08de45d8439162a Mon Sep 17 00:00:00 2001 From: Sidharth <75777345+Sid9993@users.noreply.github.com> Date: Fri, 21 Feb 2025 01:33:20 +0530 Subject: [PATCH 21/40] Cleaned the boolean usage in MLCFlow (#246) * Cleaned the boolean usage in MLCFlow * Made the remaining script changes * Update module.py | fix boolean usage in script module --------- Co-authored-by: github-actions[bot] Co-authored-by: Arjun Suresh --- automation/script/docker.py | 2 +- automation/script/module.py | 37 ++++++++----------- .../app-mlperf-inference-nvidia/customize.py | 5 ++- script/app-mlperf-inference/customize.py | 4 +- script/benchmark-program/customize.py | 7 ++-- script/download-file/customize.py | 3 +- .../customize.py | 5 ++- script/run-docker-container/customize.py | 4 +- 8 files changed, 31 insertions(+), 36 deletions(-) diff --git a/automation/script/docker.py b/automation/script/docker.py index 1e762e348..056192898 100644 --- a/automation/script/docker.py +++ b/automation/script/docker.py @@ -246,7 +246,7 @@ def docker_run(self_module, i): for t in i.get('tags', '').split(",") if t.startswith("_")] docker_cache = i.get('docker_cache', "yes") - if docker_cache.lower() in ["no", "false"]: + if is_false(docker_cache): env.setdefault('MLC_DOCKER_CACHE', docker_cache) image_repo = i.get('docker_image_repo', '') diff --git a/automation/script/module.py b/automation/script/module.py index 4c3b7fbb1..cadf35608 100644 --- a/automation/script/module.py +++ b/automation/script/module.py @@ -358,9 +358,9 @@ def _run(self, i): if fake_deps: env['MLC_TMP_FAKE_DEPS'] = 'yes' - if str(i.get('skip_sys_utils', '')).lower() in ['true', 'yes']: + if is_true(i.get('skip_sys_utils', '')): env['MLC_SKIP_SYS_UTILS'] = 'yes' - if str(i.get('skip_sudo', '')).lower() in ['true', 'yes']: + if is_true(i.get('skip_sudo', '')): env['MLC_TMP_SKIP_SUDO'] = 'yes' run_state = i.get('run_state', self.run_state) @@ -374,12 +374,10 @@ def _run(self, i): # Check verbose and silent verbose = False - silent = True if str(i.get('silent', '')).lower() in [ - 'true', 'yes', 'on'] else False + silent = True if is_true(i.get('silent', '')) else False if not silent: - silent = True if str(i.get('s', '')).lower() in [ - 'true', 'yes', 'on'] else False + silent = True if is_true(i.get('s', '')) else False if silent: if 'verbose' in i: @@ -1020,11 +1018,9 @@ def _run(self, i): if r['return'] > 0: return r - if str(env.get('MLC_RUN_STATE_DOCKER', False) - ).lower() in ['true', '1', 'yes']: + if is_true(env.get('MLC_RUN_STATE_DOCKER', False)): if state.get('docker'): - if str(state['docker'].get('run', True) - ).lower() in ['false', '0', 'no']: + if is_false(state['docker'].get('run', True)): logger.info( recursion_spaces + ' - Skipping script::{} run as we are inside docker'.format(found_script_item)) @@ -1047,7 +1043,7 @@ def _run(self, i): 'deps': []} return rr - elif str(state['docker'].get('real_run', True)).lower() in ['false', '0', 'no']: + elif is_false(state['docker'].get('real_run', True)): logger.info( recursion_spaces + ' - Doing fake run for script::{} as we are inside docker'.format(found_script_item)) @@ -1576,7 +1572,7 @@ def _run(self, i): } # Check and run predeps in customize.py - if str(meta.get('predeps', 'True')).lower() not in ["0", "false", "no"] and os.path.isfile( + if not is_false(meta.get('predeps', 'True')) and os.path.isfile( path_to_customize_py): # possible duplicate execution - needs fix r = utils.load_python_module( {'path': path, 'name': 'customize'}) @@ -2962,13 +2958,10 @@ def test(self, i): run_variations = [ f"_{v}" for v in variations if variations[v].get( 'group', - '') == '' and str( + '') == '' and not is_true( variations[v].get( 'exclude-in-test', - '')).lower() not in [ - "1", - "true", - "yes"]] + ''))] else: given_variations = run_input.get( 'variations_list', []) @@ -5029,7 +5022,7 @@ def enable_or_skip_script(meta, env): """ if not isinstance(meta, dict): - logger.info( + logger.warn( "The meta entry is not a dictionary for skip/enable if_env: %s", meta) @@ -5039,10 +5032,10 @@ def enable_or_skip_script(meta, env): value = str(env[key]).lower().strip() if set(meta_key) & set(["yes", "on", "true", "1"]): # Any set value other than false is taken as set - if value not in ["no", "off", "false", "0", ""]: + if not is_false(value) and value != '': continue elif set(meta_key) & set(["no", "off", "false", "0"]): - if value in ["no", "off", "false", "0", ""]: + if is_false(value) or value == "": continue elif value in meta_key: continue @@ -5072,10 +5065,10 @@ def any_enable_or_skip_script(meta, env): meta_key = [str(v).lower() for v in meta[key]] if set(meta_key) & set(["yes", "on", "true", "1"]): - if value not in ["no", "off", "false", "0", ""]: + if not is_false(value) and value != "": found = True elif set(meta_key) & set(["no", "off", "false", "0", ""]): - if value in ["no", "off", "false", "0", ""]: + if is_false(value) or value == "": found = True elif value in meta_key: found = True diff --git a/script/app-mlperf-inference-nvidia/customize.py b/script/app-mlperf-inference-nvidia/customize.py index 550b273fe..70e5d3710 100644 --- a/script/app-mlperf-inference-nvidia/customize.py +++ b/script/app-mlperf-inference-nvidia/customize.py @@ -1,6 +1,7 @@ from mlc import utils import os import shutil +from utils import * def preprocess(i): @@ -590,8 +591,8 @@ def preprocess(i): run_infer_on_copy_streams = str( env.get('MLC_MLPERF_NVIDIA_HARNESS_RUN_INFER_ON_COPY_STREAMS', '')) - if run_infer_on_copy_streams and run_infer_on_copy_streams.lower() not in [ - "no", "false", "0", ""]: + if run_infer_on_copy_streams and not is_false( + run_infer_on_copy_streams): run_config += " --run_infer_on_copy_streams" start_from_device = str( diff --git a/script/app-mlperf-inference/customize.py b/script/app-mlperf-inference/customize.py index ab421f248..5a215d27b 100644 --- a/script/app-mlperf-inference/customize.py +++ b/script/app-mlperf-inference/customize.py @@ -10,6 +10,7 @@ import mlperf_utils import re from datetime import datetime, timezone +from utils import * def preprocess(i): @@ -286,8 +287,7 @@ def postprocess(i): state['app_mlperf_inference_log_summary'][y[0].strip().lower() ] = y[1].strip() - if env.get("MLC_MLPERF_PRINT_SUMMARY", "").lower() not in [ - "no", "0", "false"]: + if not is_false(env.get("MLC_MLPERF_PRINT_SUMMARY", "")): print("\n") print(mlperf_log_summary) diff --git a/script/benchmark-program/customize.py b/script/benchmark-program/customize.py index 25b7a17e3..a355e8248 100644 --- a/script/benchmark-program/customize.py +++ b/script/benchmark-program/customize.py @@ -1,5 +1,6 @@ from mlc import utils import os +from utils import * def preprocess(i): @@ -20,7 +21,7 @@ def preprocess(i): env['MLC_RUN_CMD'] += ' ' + env['MLC_RUN_SUFFIX'] else: - if env['MLC_ENABLE_NUMACTL'].lower() in ["on", "1", "true", "yes"]: + if is_true(env['MLC_ENABLE_NUMACTL']): env['MLC_ENABLE_NUMACTL'] = "1" MLC_RUN_PREFIX = "numactl " + env['MLC_NUMACTL_MEMBIND'] + ' ' else: @@ -49,8 +50,8 @@ def preprocess(i): if x != '': env['MLC_RUN_CMD'] = x + ' ' + env.get('MLC_RUN_CMD', '') - if os_info['platform'] != 'windows' and str( - env.get('MLC_SAVE_CONSOLE_LOG', True)).lower() not in ["no", "false", "0"]: + if os_info['platform'] != 'windows' and not is_false( + env.get('MLC_SAVE_CONSOLE_LOG', True)): logs_dir = env.get('MLC_LOGS_DIR', env['MLC_RUN_DIR']) env['MLC_RUN_CMD'] += r" 2>&1 | tee " + q + os.path.join( logs_dir, "console.out") + q + r"; echo \${PIPESTATUS[0]} > exitstatus" diff --git a/script/download-file/customize.py b/script/download-file/customize.py index f72034d5f..64066122f 100644 --- a/script/download-file/customize.py +++ b/script/download-file/customize.py @@ -85,8 +85,7 @@ def preprocess(i): extra_download_options = env.get('MLC_DOWNLOAD_EXTRA_OPTIONS', '') verify_ssl = env.get('MLC_VERIFY_SSL', "True") - if str(verify_ssl).lower() in [ - "no", "false"] or os_info['platform'] == 'windows': + if is_false(verify_ssl) or os_info['platform'] == 'windows': verify_ssl = False else: verify_ssl = True diff --git a/script/generate-mlperf-inference-user-conf/customize.py b/script/generate-mlperf-inference-user-conf/customize.py index 4863aa494..ec1a5fab7 100644 --- a/script/generate-mlperf-inference-user-conf/customize.py +++ b/script/generate-mlperf-inference-user-conf/customize.py @@ -4,6 +4,7 @@ import shutil import subprocess import sys +from utils import * def preprocess(i): @@ -112,8 +113,8 @@ def preprocess(i): env['MLC_MLPERF_USE_MAX_DURATION'] = 'no' elif scenario == "MultiStream" and (1000 / float(value) * 660 < 662): env['MLC_MLPERF_USE_MAX_DURATION'] = 'no' - if env.get('MLC_MLPERF_MODEL_EQUAL_ISSUE_MODE', 'no').lower() not in ["yes", "1", "true"] and env.get( - 'MLC_MLPERF_USE_MAX_DURATION', "yes").lower() not in ["no", "false", "0"]: + if not is_true(env.get('MLC_MLPERF_MODEL_EQUAL_ISSUE_MODE', 'no')) and not is_false(env.get( + 'MLC_MLPERF_USE_MAX_DURATION', "yes")): tolerance = 0.4 # much lower because we have max_duration else: tolerance = 0.9 diff --git a/script/run-docker-container/customize.py b/script/run-docker-container/customize.py index a504ffd07..02ef20fd0 100644 --- a/script/run-docker-container/customize.py +++ b/script/run-docker-container/customize.py @@ -185,8 +185,8 @@ def postprocess(i): if is_true(env.get('MLC_DOCKER_USE_GOOGLE_DNS', '')): run_opts += ' --dns 8.8.8.8 --dns 8.8.4.4 ' - if env.get('MLC_CONTAINER_TOOL', '') == 'podman' and env.get( - 'MLC_PODMAN_MAP_USER_ID', '').lower() not in ["no", "0", "false"]: + if env.get('MLC_CONTAINER_TOOL', '') == 'podman' and not is_false(env.get( + 'MLC_PODMAN_MAP_USER_ID', '')): run_opts += " --userns=keep-id" if env.get('MLC_DOCKER_PORT_MAPS', []): From e8b6d474631161f6a63ab3d907eece02a7bf81cd Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Fri, 21 Feb 2025 01:27:01 +0000 Subject: [PATCH 22/40] Update build_wheel.yml --- .github/workflows/build_wheel.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build_wheel.yml b/.github/workflows/build_wheel.yml index ba82117cf..1edf42589 100644 --- a/.github/workflows/build_wheel.yml +++ b/.github/workflows/build_wheel.yml @@ -6,7 +6,7 @@ on: push: branches: - - dev + - dev_off paths: - VERSION From 9934345d55a359547da92645ef06863df8ae4936 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Fri, 21 Feb 2025 15:29:39 +0530 Subject: [PATCH 23/40] Map rocm and gpu to cuda (#251) * Map rocm and gpu to cuda --------- Co-authored-by: github-actions[bot] --- script/app-mlperf-inference-mlcommons-python/customize.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/script/app-mlperf-inference-mlcommons-python/customize.py b/script/app-mlperf-inference-mlcommons-python/customize.py index 80eb8ba57..bbdff3f7c 100644 --- a/script/app-mlperf-inference-mlcommons-python/customize.py +++ b/script/app-mlperf-inference-mlcommons-python/customize.py @@ -219,6 +219,9 @@ def get_run_cmd(os_info, env, scenario_extra_options, def get_run_cmd_reference( os_info, env, scenario_extra_options, mode_extra_options, dataset_options): + device = env['MLC_MLPERF_DEVICE'] if env['MLC_MLPERF_DEVICE'] not in [ + "gpu", "rocm"] else "cuda" + if env['MLC_MODEL'] in ["gptj-99", "gptj-99.9"]: env['RUN_DIR'] = os.path.join( @@ -352,8 +355,6 @@ def get_run_cmd_reference( "fid")) backend = env['MLC_MLPERF_BACKEND'] - device = env['MLC_MLPERF_DEVICE'] if env['MLC_MLPERF_DEVICE'] not in [ - "gpu", "rocm"] else "cuda" max_batchsize = env.get('MLC_MLPERF_LOADGEN_MAX_BATCHSIZE', '1') cmd = env['MLC_PYTHON_BIN_WITH_PATH'] + " main.py " \ " --scenario " + env['MLC_MLPERF_LOADGEN_SCENARIO'] + \ @@ -377,7 +378,6 @@ def get_run_cmd_reference( "language", "llama2-70b") backend = env['MLC_MLPERF_BACKEND'] - device = env['MLC_MLPERF_DEVICE'] if env['MLC_MLPERF_DEVICE'] != "gpu" else "cuda" cmd = env['MLC_PYTHON_BIN_WITH_PATH'] + " main.py " \ " --scenario " + env['MLC_MLPERF_LOADGEN_SCENARIO'] + \ @@ -410,7 +410,6 @@ def get_run_cmd_reference( "language", "mixtral-8x7b") backend = env['MLC_MLPERF_BACKEND'] - device = env['MLC_MLPERF_DEVICE'] if env['MLC_MLPERF_DEVICE'] != "gpu" else "cuda" cmd = env['MLC_PYTHON_BIN_WITH_PATH'] + " main.py " \ " --scenario " + env['MLC_MLPERF_LOADGEN_SCENARIO'] + \ " --dataset-path " + env['MLC_DATASET_MIXTRAL_PREPROCESSED_PATH'] + \ @@ -489,7 +488,6 @@ def get_run_cmd_reference( else: mode_extra_options += " --dataset igbh-dgl-tiny --profile debug-dgl " - device = env['MLC_MLPERF_DEVICE'] if env['MLC_MLPERF_DEVICE'] != "gpu" else "cuda" # have to add the condition for running in debug mode or real run mode cmd = env['MLC_PYTHON_BIN_WITH_PATH'] + " main.py " \ " --scenario " + env['MLC_MLPERF_LOADGEN_SCENARIO'] + \ From 1c6fda2c25d02377f26b3c339abea19b98ef2860 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Fri, 21 Feb 2025 22:02:49 +0000 Subject: [PATCH 24/40] Fixes get,igbh,dataset on host (#252) * Support docker script for mobilenet models * Added torch deps for igbh --- script/get-dataset-igbh/meta.yaml | 3 +- .../meta.yaml | 36 ++++++++++++++----- 2 files changed, 30 insertions(+), 9 deletions(-) diff --git a/script/get-dataset-igbh/meta.yaml b/script/get-dataset-igbh/meta.yaml index b48e7b42c..2f6dc96ad 100644 --- a/script/get-dataset-igbh/meta.yaml +++ b/script/get-dataset-igbh/meta.yaml @@ -24,9 +24,10 @@ deps: - tags: get,python names: - get-python - - tags: get,generic-python-lib,_package.igb,_url.git+https://github.com/gateoverflow/IGB-Datasets.git + - tags: get,generic-python-lib,_package.igb,_url.git+https://github.com/IllinoisGraphBenchmark/IGB-Datasets - tags: get,generic-python-lib,_package.colorama - tags: get,generic-python-lib,_package.tqdm + - tags: get,generic-python-lib,_package.torch prehook_deps: diff --git a/script/run-mlperf-inference-mobilenet-models/meta.yaml b/script/run-mlperf-inference-mobilenet-models/meta.yaml index fd4cf3468..93d8ecc17 100644 --- a/script/run-mlperf-inference-mobilenet-models/meta.yaml +++ b/script/run-mlperf-inference-mobilenet-models/meta.yaml @@ -8,22 +8,42 @@ default_env: MLC_MLPERF_RUN_FP32: 'yes' MLC_MLPERF_RUN_INT8: 'yes' MLC_MLPERF_RUN_MOBILENETS: 'no' + MLC_USE_DATASET_FROM_HOST: 'yes' deps: - tags: get,sys-utils-mlc docker: + deps: + - tags: get,mlperf,inference,results,dir,local + names: + - get-mlperf-inference-results-dir + skip_if_env: + OUTPUT_BASE_DIR: [ on ] + - tags: get,mlperf,inference,submission,dir,local + names: + - get-mlperf-inference-submission-dir + skip_if_env: + MLC_MLPERF_INFERENCE_SUBMISSION_DIR: [ on ] + - tags: get,dataset,imagenet,validation,original,_full + enable_if_env: + MLC_USE_DATASET_FROM_HOST: + - 'yes' + names: + - imagenet-original + - dataset-original input_mapping: - imagenet_path: IMAGENET_PATH - results_dir: RESULTS_DIR - submission_dir: SUBMISSION_DIR + imagenet_path: MLC_DATASET_IMAGENET_PATH + results_dir: MLC_MLPERF_INFERENCE_RESULTS_DIR + submission_dir: MLC_MLPERF_INFERENCE_SUBMISSION_DIR docker_run_final_cmds: - - mlcr run,mlperf,inference,mobilenet-models,_find-performance --adr.mlperf-inference-implementation.fake_run=True - --adr.compiler.tags=gcc + - mlcr run,mlperf,inference,mobilenet-models,_find-performance --adr.mlperf-inference-implementation.fake_run=True --adr.compiler.tags=gcc fake_run_deps: false mounts: - - ${{ IMAGENET_PATH }}:${{ IMAGENET_PATH }} - - ${{ RESULTS_DIR }}:${{ RESULTS_DIR }} - - ${{ SUBMISSION_DIR }}:${{ SUBMISSION_DIR }} + - "${{ MLC_DATASET_IMAGENET_PATH }}:${{ MLC_DATASET_IMAGENET_PATH }}" + - "${{ MLC_MLPERF_INFERENCE_RESULTS_DIR }}:${{ MLC_MLPERF_INFERENCE_RESULTS_DIR }}" + - "${{ OUTPUT_BASE_DIR }}:${{ OUTPUT_BASE_DIR }}" + - "${{ MLC_MLPERF_INFERENCE_SUBMISSION_DIR }}:${{ MLC_MLPERF_INFERENCE_SUBMISSION_DIR }}" run: true + docker_it: true input_mapping: find-performance: MLC_MLPERF_FIND_PERFORMANCE_MODE imagenet_path: IMAGENET_PATH From c7af0d3f04aa09d74729a8e73fe9d55317a91924 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Fri, 21 Feb 2025 22:47:42 +0000 Subject: [PATCH 25/40] Update customize.py | Fix boolean value for --compliance (#254) --- script/run-mlperf-inference-app/customize.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/script/run-mlperf-inference-app/customize.py b/script/run-mlperf-inference-app/customize.py index 5c3932a3d..3d1886e1a 100644 --- a/script/run-mlperf-inference-app/customize.py +++ b/script/run-mlperf-inference-app/customize.py @@ -305,7 +305,7 @@ def preprocess(i): if state.get('docker', {}): del (state['docker']) - if env.get("MLC_MLPERF_LOADGEN_COMPLIANCE", "") == "yes": + if is_true(env.get("MLC_MLPERF_LOADGEN_COMPLIANCE", "")): for test in test_list: env['MLC_MLPERF_LOADGEN_COMPLIANCE_TEST'] = test env['MLC_MLPERF_LOADGEN_MODE'] = "compliance" From 3a9fcb1c0e2371b5394046e8122f108717e7b53f Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Sat, 22 Feb 2025 11:24:41 +0000 Subject: [PATCH 26/40] Fix for no-cache in run-mobilenets (#256) * Fix no cache issue for mobilenet * Added test for mobilenet --- .../run-mlperf-inference-mobilenet-models/customize.py | 1 + script/run-mlperf-inference-mobilenet-models/meta.yaml | 10 ++++++++++ 2 files changed, 11 insertions(+) diff --git a/script/run-mlperf-inference-mobilenet-models/customize.py b/script/run-mlperf-inference-mobilenet-models/customize.py index 6641448e2..4e0641637 100644 --- a/script/run-mlperf-inference-mobilenet-models/customize.py +++ b/script/run-mlperf-inference-mobilenet-models/customize.py @@ -17,6 +17,7 @@ def preprocess(i): adr = i['input'].get('adr') automation = i['automation'] + mlc = i['automation'].action_object quiet = (env.get('MLC_QUIET', False) == 'yes') verbose = (env.get('MLC_VERBOSE', False) == 'yes') diff --git a/script/run-mlperf-inference-mobilenet-models/meta.yaml b/script/run-mlperf-inference-mobilenet-models/meta.yaml index 93d8ecc17..b1d2b3f0c 100644 --- a/script/run-mlperf-inference-mobilenet-models/meta.yaml +++ b/script/run-mlperf-inference-mobilenet-models/meta.yaml @@ -152,3 +152,13 @@ variations: group: optimization use-neon: alias: neon +tests: + run_inputs: + - env: + MLC_TEST_ONE_RUN: 'yes' + variations_list: + - tflite + - accuracy_only + adr: + compiler: + tags: gcc From 1879acd3bc8659c72eee5850064ec9f164654429 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Sat, 22 Feb 2025 13:13:02 +0000 Subject: [PATCH 27/40] Added alternative download link for imagenet-aux (#257) --- script/get-dataset-imagenet-aux/meta.yaml | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/script/get-dataset-imagenet-aux/meta.yaml b/script/get-dataset-imagenet-aux/meta.yaml index 00036303f..0863b982d 100644 --- a/script/get-dataset-imagenet-aux/meta.yaml +++ b/script/get-dataset-imagenet-aux/meta.yaml @@ -30,10 +30,19 @@ variations: '2012': env: MLC_DATASET_AUX_VER: '2012' - from.berkeleyvision: + from.go: base: - '2012' default: true + env: + MLC_DOWNLOAD_CHECKSUM: ee346d67141e476df9c1a3f813552503 + MLC_DOWNLOAD_CHECKSUM1: ee346d67141e476df9c1a3f813552503 + MLC_PACKAGE_URL: https://armi.in/files/caffe_ilsvrc12.tar.gz + MLC_PACKAGE_URL1: https://www.dropbox.com/s/92n2fyej3lzy3s3/caffe_ilsvrc12.tar.gz + group: download-source + from.berkeleyvision: + base: + - '2012' env: MLC_DOWNLOAD_CHECKSUM: f963098ea0e785a968ca1eb634003a90 MLC_DOWNLOAD_CHECKSUM1: ee346d67141e476df9c1a3f813552503 From f8e76e5bbe576c7f2f06e50235f742c50c09a669 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Sun, 23 Feb 2025 16:35:09 +0000 Subject: [PATCH 28/40] Code cleanup for mobilenet runs * Added alternative download link for imagenet-aux * Clean-up install-python-src meta * Update mobilenet docker meta * Update meta.yaml * Fix intercative for mobilenet docker * Removed mlc import in mobilenet-models * Added print for rm cache in mlperf-inference-mobilenet-models --- .github/workflows/build_wheel.yml | 1 + script/install-python-src/meta.yaml | 4 ++ .../customize.py | 37 +++++++++++-------- .../meta.yaml | 10 +++-- 4 files changed, 34 insertions(+), 18 deletions(-) diff --git a/.github/workflows/build_wheel.yml b/.github/workflows/build_wheel.yml index 1edf42589..8de31237d 100644 --- a/.github/workflows/build_wheel.yml +++ b/.github/workflows/build_wheel.yml @@ -10,6 +10,7 @@ on: paths: - VERSION + jobs: build_wheels: diff --git a/script/install-python-src/meta.yaml b/script/install-python-src/meta.yaml index 99ed1df15..f2e6e533f 100644 --- a/script/install-python-src/meta.yaml +++ b/script/install-python-src/meta.yaml @@ -20,6 +20,9 @@ deps: MLC_HOST_OS_FLAVOR: - ubuntu - tags: get,generic-sys-util,_libssl-dev + enable_if_env: + MLC_ENABLE_SSL: + - 'yes' - enable_if_env: MLC_HOST_OS_FLAVOR: - ubuntu @@ -84,3 +87,4 @@ variations: MLC_ENABLE_SSL: 'yes' MLC_PYTHON_INSTALL_CACHE_TAGS: with-ssl group: ssl + default: true diff --git a/script/run-mlperf-inference-mobilenet-models/customize.py b/script/run-mlperf-inference-mobilenet-models/customize.py index 4e0641637..0d50b0c37 100644 --- a/script/run-mlperf-inference-mobilenet-models/customize.py +++ b/script/run-mlperf-inference-mobilenet-models/customize.py @@ -1,7 +1,7 @@ from mlc import utils import os -import mlc import sys +from utils import * def preprocess(i): @@ -122,10 +122,18 @@ def preprocess(i): implementation_tags_string = ",".join(implementation_tags) inp = i['input'] + clean_input = { + 'action': 'rm', + 'target': 'cache', + 'tags': 'get,preprocessed,dataset,_for.mobilenet', + 'quiet': True, + 'v': verbose, + 'f': True + } - for model in variation_strings: - for v in variation_strings[model]: - for precision in precisions: + for precision in precisions: + for model in variation_strings: + for v in variation_strings[model]: if "small-minimalistic" in v and precision == "uint8": continue @@ -195,17 +203,16 @@ def preprocess(i): if env.get('MLC_TEST_ONE_RUN', '') == "yes": return {'return': 0} - clean_input = { - 'action': 'rm', - 'automation': 'cache', - 'tags': 'get,preprocessed,dataset,_for.mobilenet', - 'quiet': True, - 'v': verbose, - 'f': True - } - r = mlc.access(clean_input) - # if r['return'] > 0: - # return r + if is_true(env.get('MLC_MINIMIZE_DISK_SPACE', '')): + r = mlc.access(clean_input) + if r['return'] > 0: + print(r) + # return r + + r = mlc.access(clean_input) + if r['return'] > 0: + print(r) + # return r return {'return': 0} diff --git a/script/run-mlperf-inference-mobilenet-models/meta.yaml b/script/run-mlperf-inference-mobilenet-models/meta.yaml index b1d2b3f0c..e2c9526fb 100644 --- a/script/run-mlperf-inference-mobilenet-models/meta.yaml +++ b/script/run-mlperf-inference-mobilenet-models/meta.yaml @@ -34,16 +34,20 @@ docker: imagenet_path: MLC_DATASET_IMAGENET_PATH results_dir: MLC_MLPERF_INFERENCE_RESULTS_DIR submission_dir: MLC_MLPERF_INFERENCE_SUBMISSION_DIR - docker_run_final_cmds: - - mlcr run,mlperf,inference,mobilenet-models,_find-performance --adr.mlperf-inference-implementation.fake_run=True --adr.compiler.tags=gcc fake_run_deps: false + pre_run_cmds: + - mlc pull repo mounts: - "${{ MLC_DATASET_IMAGENET_PATH }}:${{ MLC_DATASET_IMAGENET_PATH }}" - "${{ MLC_MLPERF_INFERENCE_RESULTS_DIR }}:${{ MLC_MLPERF_INFERENCE_RESULTS_DIR }}" - "${{ OUTPUT_BASE_DIR }}:${{ OUTPUT_BASE_DIR }}" - "${{ MLC_MLPERF_INFERENCE_SUBMISSION_DIR }}:${{ MLC_MLPERF_INFERENCE_SUBMISSION_DIR }}" run: true - docker_it: true + interactive: true + mlc_repo: mlcommons@mlperf-automations + mlc_repo_branch: dev + real_run: False + os_version: '22.04' input_mapping: find-performance: MLC_MLPERF_FIND_PERFORMANCE_MODE imagenet_path: IMAGENET_PATH From d8bf122f25afd0973840849987e550868fdb2b36 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Sun, 23 Feb 2025 22:11:45 +0000 Subject: [PATCH 29/40] Make low disk usage the default in mobilenet run (#264) --- .../customize.py | 28 +++++++++---------- .../meta.yaml | 1 + 2 files changed, 15 insertions(+), 14 deletions(-) diff --git a/script/run-mlperf-inference-mobilenet-models/customize.py b/script/run-mlperf-inference-mobilenet-models/customize.py index 0d50b0c37..2588aca8e 100644 --- a/script/run-mlperf-inference-mobilenet-models/customize.py +++ b/script/run-mlperf-inference-mobilenet-models/customize.py @@ -50,18 +50,18 @@ def preprocess(i): } models = {} - if env.get('MLC_MLPERF_RUN_MOBILENET_V1', '') == "yes": + if is_true(env.get('MLC_MLPERF_RUN_MOBILENET_V1', '')): models['mobilenet'] = {} models['mobilenet']['v1'] = models_all['mobilenet']['v1'] - elif env.get('MLC_MLPERF_RUN_MOBILENET_V2', '') == "yes": + elif is_true(env.get('MLC_MLPERF_RUN_MOBILENET_V2', '')): models['mobilenet'] = {} models['mobilenet']['v2'] = models_all['mobilenet']['v2'] - elif env.get('MLC_MLPERF_RUN_MOBILENET_V3', '') == "yes": + elif is_true(env.get('MLC_MLPERF_RUN_MOBILENET_V3', '')): models['mobilenet'] = {} models['mobilenet']['v3'] = models_all['mobilenet']['v3'] - elif env.get('MLC_MLPERF_RUN_MOBILENETS', '') == "yes": + elif is_true(env.get('MLC_MLPERF_RUN_MOBILENETS', '')): models['mobilenet'] = models_all['mobilenet'] - elif env.get('MLC_MLPERF_RUN_EFFICIENTNETS', '') == "yes": + elif is_true(env.get('MLC_MLPERF_RUN_EFFICIENTNETS', '')): models['efficientnet'] = models_all['efficientnet'] variation_strings = {} @@ -90,16 +90,16 @@ def preprocess(i): variation_list.append("_" + k3) variation_strings[t1].append(",".join(variation_list)) - if env.get('MLC_MLPERF_SUBMISSION_MODE', '') == "yes": + if is_true(env.get('MLC_MLPERF_SUBMISSION_MODE', '')): var = "_submission" execution_mode = "valid" - elif env.get('MLC_MLPERF_ACCURACY_MODE', '') == "yes" and env.get('MLC_MLPERF_PERFORMANCE_MODE', '') == "yes": + elif is_true(env.get('MLC_MLPERF_ACCURACY_MODE', '')) and is_true(env.get('MLC_MLPERF_PERFORMANCE_MODE', '')): var = "_full,_performance-and-accuracy" execution_mode = "valid" - elif env.get('MLC_MLPERF_ACCURACY_MODE', '') == "yes": + elif is_true(env.get('MLC_MLPERF_ACCURACY_MODE', '')): var = "_full,_accuracy-only" execution_mode = "valid" - elif env.get('MLC_MLPERF_PERFORMANCE_MODE', '') == "yes": + elif is_true(env.get('MLC_MLPERF_PERFORMANCE_MODE', '')): var = "_full,_performance-only" execution_mode = "valid" else: @@ -178,21 +178,21 @@ def preprocess(i): if env.get('MLC_MLPERF_INFERENCE_SUBMISSION_DIR', '') != '': mlc_input['submission_dir'] = env['MLC_MLPERF_INFERENCE_SUBMISSION_DIR'] - if env.get('MLC_MLPERF_FIND_PERFORMANCE_MODE', '') == "yes" and env.get( - 'MLC_MLPERF_NO_RERUN', '') != 'yes': + if is_true(env.get('MLC_MLPERF_FIND_PERFORMANCE_MODE', '')) and not is_true(env.get( + 'MLC_MLPERF_NO_RERUN', '')): mlc_input['rerun'] = True - if env.get('MLC_MLPERF_POWER', '') == "yes": + if is_true(env.get('MLC_MLPERF_POWER', '')): mlc_input['power'] = 'yes' - if env.get('MLC_MLPERF_ACCURACY_MODE', '') == "yes": + if is_true(env.get('MLC_MLPERF_ACCURACY_MODE', '')): mlc_input['mode'] = 'accuracy' print(mlc_input) r = mlc.access(mlc_input) if r['return'] > 0: return r - if env.get('MLC_MLPERF_PERFORMANCE_MODE', '') == "yes": + if is_true(env.get('MLC_MLPERF_PERFORMANCE_MODE', '')): mlc_input['mode'] = 'performance' print(mlc_input) diff --git a/script/run-mlperf-inference-mobilenet-models/meta.yaml b/script/run-mlperf-inference-mobilenet-models/meta.yaml index e2c9526fb..f45ab7c64 100644 --- a/script/run-mlperf-inference-mobilenet-models/meta.yaml +++ b/script/run-mlperf-inference-mobilenet-models/meta.yaml @@ -9,6 +9,7 @@ default_env: MLC_MLPERF_RUN_INT8: 'yes' MLC_MLPERF_RUN_MOBILENETS: 'no' MLC_USE_DATASET_FROM_HOST: 'yes' + MLC_MINIMIZE_DISK_SPACE: 'yes' deps: - tags: get,sys-utils-mlc docker: From d8cae3a662a931ccddc8ed384b26caad43a1f954 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Mon, 24 Feb 2025 15:01:55 +0530 Subject: [PATCH 30/40] Add script to download waymo calibration dataset (#265) --- .../COPYRIGHT.md | 9 +++++ .../customize.py | 35 +++++++++++++++++++ .../get-dataset-waymo-calibration/meta.yaml | 35 +++++++++++++++++++ .../run-rclone.sh | 4 +++ 4 files changed, 83 insertions(+) create mode 100644 script/get-dataset-waymo-calibration/COPYRIGHT.md create mode 100644 script/get-dataset-waymo-calibration/customize.py create mode 100644 script/get-dataset-waymo-calibration/meta.yaml create mode 100644 script/get-dataset-waymo-calibration/run-rclone.sh diff --git a/script/get-dataset-waymo-calibration/COPYRIGHT.md b/script/get-dataset-waymo-calibration/COPYRIGHT.md new file mode 100644 index 000000000..d2ceead84 --- /dev/null +++ b/script/get-dataset-waymo-calibration/COPYRIGHT.md @@ -0,0 +1,9 @@ +# Copyright Notice + +© 2025-2026 MLCommons. All Rights Reserved. + +This file is licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License can be obtained at: + +[Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0) + +Unless required by applicable law or agreed to in writing, software distributed under the License is provided on an "AS IS" basis, without warranties or conditions of any kind, either express or implied. Please refer to the License for the specific language governing permissions and limitations under the License. diff --git a/script/get-dataset-waymo-calibration/customize.py b/script/get-dataset-waymo-calibration/customize.py new file mode 100644 index 000000000..e404aa512 --- /dev/null +++ b/script/get-dataset-waymo-calibration/customize.py @@ -0,0 +1,35 @@ +from mlc import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + if os_info['platform'] == "windows": + return {'return': 1, 'error': 'Script not supported in windows yet!'} + + if env.get('MLC_DATASET_WAYMO_CALIBRATION_PATH', '') != '': + if not os.path.exists(env['MLC_DATASET_WAYMO_CALIBRATION_PATH']): + return { + 'return': 1, 'error': f"Path {env['MLC_DATASET_WAYMO_CALIBRATION_PATH']} does not exists!"} + else: + env['MLC_TMP_REQUIRE_DOWNLOAD'] = "yes" + if env['MLC_DOWNLOAD_SRC'] == "mlcommons": + i['run_script_input']['script_name'] = 'run-rclone' + if env.get('MLC_OUTDIRNAME', '') != '': + env['MLC_DATASET_WAYMO_CALIBRATION_PATH'] = env['MLC_OUTDIRNAME'] + else: + env['MLC_DATASET_WAYMO_CALIBRATION_PATH'] = os.path.join( + os.getcwd(), "kitti_format", "calibration") + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + return {'return': 0} diff --git a/script/get-dataset-waymo-calibration/meta.yaml b/script/get-dataset-waymo-calibration/meta.yaml new file mode 100644 index 000000000..dbc17d494 --- /dev/null +++ b/script/get-dataset-waymo-calibration/meta.yaml @@ -0,0 +1,35 @@ +alias: get-dataset-waymo-calibration +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +input_mapping: + waymo_calibration_path: MLC_DATASET_WAYMO_CALIBRATION_PATH +new_env_keys: +- MLC_DATASET_WAYMO_CALIBRATION_PATH +tags: +- get +- waymo +- dataset +- calibration +uid: 59d3a8d48d5e4767 +variations: + kitti_format: + default: true + env: + MLC_DATASET_WAYMO_FORMAT: kitti + group: dataset-format + mlc: + default: true + env: + MLC_DOWNLOAD_SRC: mlcommons + group: download-src + prehook_deps: + - enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - true + tags: get,rclone + - enable_if_env: + MLC_TMP_REQUIRE_DOWNLOAD: + - true + force_cache: true + tags: get,rclone-config,_waymo diff --git a/script/get-dataset-waymo-calibration/run-rclone.sh b/script/get-dataset-waymo-calibration/run-rclone.sh new file mode 100644 index 000000000..fd289eff4 --- /dev/null +++ b/script/get-dataset-waymo-calibration/run-rclone.sh @@ -0,0 +1,4 @@ +cmd="rclone sync mlc-waymo:waymo_preprocessed_dataset/kitti_format/testing ${MLC_DATASET_WAYMO_CALIBRATION_PATH} -P" +echo $cmd +eval $cmd +test $? -eq 0 || exit $? \ No newline at end of file From 3ea96dd8f8df88c2ca587a8c09ff7aa9cdb91227 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Mon, 24 Feb 2025 09:32:48 +0000 Subject: [PATCH 31/40] Fixes for mobilenet run (#266) * Fix docker user for mobilenet * Fix mobilenet run script --- .../run-mobilenet-models.sh | 20 ++++++++----------- .../meta.yaml | 1 + 2 files changed, 9 insertions(+), 12 deletions(-) diff --git a/script/run-all-mlperf-models/run-mobilenet-models.sh b/script/run-all-mlperf-models/run-mobilenet-models.sh index a268c49b1..9ad2bc7cd 100644 --- a/script/run-all-mlperf-models/run-mobilenet-models.sh +++ b/script/run-all-mlperf-models/run-mobilenet-models.sh @@ -32,24 +32,20 @@ extra_tags="" #Add your run commands here... # run "$MLC_RUN_CMD" -run "mlcr run,mobilenet-models,_tflite,_accuracy-only$extra_tags \ +run "mlcr run,mobilenet-models,_tflite$extra_tags \ --adr.compiler.tags=gcc \ ${extra_option} " -run "mlcr run,mobilenet-models,_tflite,_performance-only$extra_tags \ -${POWER} \ +run "mlcr run,mobilenet-models,_tflite,_armnn,_neon$extra_tags \ --adr.compiler.tags=gcc \ -${extra_option}" - +${extra_option} " -run "mlcr run,mobilenet-models,_tflite,_armnn,_neon,_accuracy-only$extra_tags \ +extra_option=" --adr.mlperf-inference-implementation.compressed_dataset=on" +run "mlcr run,mobilenet-models,_tflite$extra_tags \ --adr.compiler.tags=gcc \ ${extra_option} " -run "mlcr run,mobilenet-models,_tflite,_armnn,_neon,_performance-only$extra_tags \ -${POWER} \ -${extra_option} \ ---adr.compiler.tags=gcc" - - +run "mlcr run,mobilenet-models,_tflite,_armnn,_neon$extra_tags \ +--adr.compiler.tags=gcc \ +${extra_option} " diff --git a/script/run-mlperf-inference-mobilenet-models/meta.yaml b/script/run-mlperf-inference-mobilenet-models/meta.yaml index f45ab7c64..a6a42760a 100644 --- a/script/run-mlperf-inference-mobilenet-models/meta.yaml +++ b/script/run-mlperf-inference-mobilenet-models/meta.yaml @@ -45,6 +45,7 @@ docker: - "${{ MLC_MLPERF_INFERENCE_SUBMISSION_DIR }}:${{ MLC_MLPERF_INFERENCE_SUBMISSION_DIR }}" run: true interactive: true + user: mlcuser mlc_repo: mlcommons@mlperf-automations mlc_repo_branch: dev real_run: False From 5df666a5523db20b01ca2d8f002083aeb1feb229 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Mon, 24 Feb 2025 13:26:18 +0000 Subject: [PATCH 32/40] Update getting-started.md --- docs/getting-started.md | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/docs/getting-started.md b/docs/getting-started.md index 2bf8ff5b2..248c604bc 100644 --- a/docs/getting-started.md +++ b/docs/getting-started.md @@ -1,5 +1,12 @@ # Getting Started with MLC Script Automation +## Install MLC Scripts +``` +pip install mlc-scripts +``` + +For more customized installation you can see [here](https://docs.mlcommons.org/mlcflow/install/) + ## Running MLC Scripts To execute a simple script in MLC that captures OS details, use the following command: @@ -12,7 +19,6 @@ mlcr detect,os -j This command gathers details about the system on which it's run, such as: ```json -$ mlcr detect,os -j [2025-02-03 04:57:23,449 main.py:694 INFO] - Repos path for Index: /home/arjun/MLC/repos [2025-02-03 04:57:24,167 main.py:837 INFO] - Shared index for script saved to /home/arjun/MLC/repos/index_script.json. [2025-02-03 04:57:24,167 main.py:837 INFO] - Shared index for cache saved to /home/arjun/MLC/repos/index_cache.json. From fd34624c168fab7050380b6addd9f5ecb14edf72 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Mon, 24 Feb 2025 15:26:26 +0000 Subject: [PATCH 33/40] Support mlperf inference submission tar file generation (#267) * Fix the mlperf inference submission command to include only the submission sub_dirs --- .../meta.yaml | 2 ++ script/tar-my-folder/customize.py | 22 +++++++++++++++---- script/tar-my-folder/meta.yaml | 1 + 3 files changed, 21 insertions(+), 4 deletions(-) diff --git a/script/run-mlperf-inference-submission-checker/meta.yaml b/script/run-mlperf-inference-submission-checker/meta.yaml index d5971b146..6c5a8d417 100644 --- a/script/run-mlperf-inference-submission-checker/meta.yaml +++ b/script/run-mlperf-inference-submission-checker/meta.yaml @@ -80,6 +80,8 @@ post_deps: tags: run,tar env: MLC_TAR_INPUT_DIR: <<>> + MLC_TAR_SUB_FOLDERS_TO_INCLUDE: closed,open,network + COPYFILE_DISABLE: 1 - enable_if_env: MLC_MLPERF_SUBMITTER_ID: - 'yes' diff --git a/script/tar-my-folder/customize.py b/script/tar-my-folder/customize.py index 9013a3431..a8407ff2c 100644 --- a/script/tar-my-folder/customize.py +++ b/script/tar-my-folder/customize.py @@ -21,10 +21,24 @@ def preprocess(i): env['MLC_TAR_OUTFILE'] = output_file from pathlib import Path input_path = Path(input_dir) - cd_dir = input_path.parent.absolute() - CMD = 'tar --directory ' + \ - str(cd_dir) + ' -czf ' + os.path.join(output_dir, - output_file) + ' ' + input_dirname + sub_folders_to_include = env.get('MLC_TAR_SUB_FOLDERS_TO_INCLUDE', '') + if sub_folders_to_include != '': + cd_dir = input_path.absolute() + r = sub_folders_to_include.split(",") + v_sub_folders = [] + for sub_folder in r: + f = sub_folder.strip() + if os.path.exists(os.path.join(input_path, f)): + v_sub_folders.append(f) + CMD = 'tar --directory ' + \ + str(cd_dir) + ' -czf ' + os.path.join(output_dir, + output_file) + ' ' + ' '.join(v_sub_folders) + else: + cd_dir = input_path.parent.absolute() + CMD = 'tar --directory ' + \ + str(cd_dir) + ' -czf ' + os.path.join(output_dir, + output_file) + ' ' + input_dirname + print(CMD) ret = os.system(CMD) print("Tar file " + os.path.join(output_dir, output_file) + " created") diff --git a/script/tar-my-folder/meta.yaml b/script/tar-my-folder/meta.yaml index 900475310..0a07b105c 100644 --- a/script/tar-my-folder/meta.yaml +++ b/script/tar-my-folder/meta.yaml @@ -9,6 +9,7 @@ input_mapping: input_dir: MLC_TAR_INPUT_DIR outfile: MLC_TAR_OUTFILE output_dir: MLC_TAR_OUTPUT_DIR + sub_folders_to_include: MLC_TAR_SUB_FOLDERS_TO_INCLUDE new_env_keys: - MLC_TAR_OUTFILE tags: From b964b0b412e4dcf629689b4479756be25e5205fd Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Tue, 25 Feb 2025 00:36:32 +0530 Subject: [PATCH 34/40] convert relative to abs file path (#270) --- script/submit-mlperf-results/customize.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/script/submit-mlperf-results/customize.py b/script/submit-mlperf-results/customize.py index 0936ec560..ae5824af2 100644 --- a/script/submit-mlperf-results/customize.py +++ b/script/submit-mlperf-results/customize.py @@ -17,6 +17,14 @@ def preprocess(i): file_path = env['MLC_MLPERF_SUBMISSION_FILE'] submitter_name = env.get('MLC_MLPERF_SUBMITTER', '') + # check the file_path is absolute or relative + # if it is relative, convert to absolute + if not os.path.isabs(file_path): + file_path = os.path.abspath( + os.path.join( + env['MLC_TMP_CURRENT_PATH'], + file_path)) + r = get_signed_url( server, benchmark, From 231a21931042556aa4ecb61e80717e46e10948f9 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Tue, 25 Feb 2025 10:49:01 +0000 Subject: [PATCH 35/40] Cleanup for run-mobilenet script (#272) --- .github/workflows/test-scc24-sdxl.yaml | 3 +- .../customize.py | 49 +++++++++---------- 2 files changed, 25 insertions(+), 27 deletions(-) diff --git a/.github/workflows/test-scc24-sdxl.yaml b/.github/workflows/test-scc24-sdxl.yaml index ffe814ba4..3151c8183 100644 --- a/.github/workflows/test-scc24-sdxl.yaml +++ b/.github/workflows/test-scc24-sdxl.yaml @@ -1,8 +1,9 @@ name: MLPerf inference SDXL (SCC) + on: schedule: - - cron: "34 19 * * *" + - cron: "34 19 1 * *" jobs: build_reference: diff --git a/script/run-mlperf-inference-mobilenet-models/customize.py b/script/run-mlperf-inference-mobilenet-models/customize.py index 2588aca8e..fb3238ca2 100644 --- a/script/run-mlperf-inference-mobilenet-models/customize.py +++ b/script/run-mlperf-inference-mobilenet-models/customize.py @@ -2,6 +2,8 @@ import os import sys from utils import * +import mlc +import importlib def preprocess(i): @@ -17,7 +19,9 @@ def preprocess(i): adr = i['input'].get('adr') automation = i['automation'] - mlc = i['automation'].action_object + # mlc = i['automation'].action_object + # cache_action = i['automation'].cache_action + cache_action = mlc quiet = (env.get('MLC_QUIET', False) == 'yes') verbose = (env.get('MLC_VERBOSE', False) == 'yes') @@ -61,7 +65,7 @@ def preprocess(i): models['mobilenet']['v3'] = models_all['mobilenet']['v3'] elif is_true(env.get('MLC_MLPERF_RUN_MOBILENETS', '')): models['mobilenet'] = models_all['mobilenet'] - elif is_true(env.get('MLC_MLPERF_RUN_EFFICIENTNETS', '')): + if is_true(env.get('MLC_MLPERF_RUN_EFFICIENTNETS', '')): models['efficientnet'] = models_all['efficientnet'] variation_strings = {} @@ -107,17 +111,17 @@ def preprocess(i): execution_mode = "test" precisions = [] - if env.get('MLC_MLPERF_RUN_FP32', '') == "yes": + if is_true(env.get('MLC_MLPERF_RUN_FP32', '')): precisions.append("fp32") - if env.get('MLC_MLPERF_RUN_INT8', '') == "yes": + if is_true(env.get('MLC_MLPERF_RUN_INT8', '')): precisions.append("uint8") implementation_tags = [] - if env.get('MLC_MLPERF_USE_ARMNN_LIBRARY', '') == "yes": + if is_true(env.get('MLC_MLPERF_USE_ARMNN_LIBRARY', '')): implementation_tags.append("_armnn") - if env.get('MLC_MLPERF_TFLITE_ARMNN_NEON', '') == "yes": + if is_true(env.get('MLC_MLPERF_TFLITE_ARMNN_NEON', '')): implementation_tags.append("_use-neon") - if env.get('MLC_MLPERF_TFLITE_ARMNN_OPENCL', '') == "yes": + if is_true(env.get('MLC_MLPERF_TFLITE_ARMNN_OPENCL', '')): implementation_tags.append("_use-opencl") implementation_tags_string = ",".join(implementation_tags) @@ -185,34 +189,27 @@ def preprocess(i): if is_true(env.get('MLC_MLPERF_POWER', '')): mlc_input['power'] = 'yes' - if is_true(env.get('MLC_MLPERF_ACCURACY_MODE', '')): - mlc_input['mode'] = 'accuracy' - print(mlc_input) - r = mlc.access(mlc_input) - if r['return'] > 0: - return r - - if is_true(env.get('MLC_MLPERF_PERFORMANCE_MODE', '')): - mlc_input['mode'] = 'performance' - - print(mlc_input) - r = mlc.access(mlc_input) - if r['return'] > 0: - return r - - if env.get('MLC_TEST_ONE_RUN', '') == "yes": - return {'return': 0} + print(mlc_input) + r = mlc.access(mlc_input) + if r['return'] > 0: + return r + importlib.reload(mlc.action) if is_true(env.get('MLC_MINIMIZE_DISK_SPACE', '')): - r = mlc.access(clean_input) + r = cache_action.access(clean_input) if r['return'] > 0: print(r) # return r - r = mlc.access(clean_input) + if is_true(env.get('MLC_TEST_ONE_RUN', '')): + return {'return': 0} + + r = cache_action.access(clean_input) if r['return'] > 0: print(r) # return r + else: + importlib.reload(mlc.action) return {'return': 0} From 72fbb7a50e24fcba28a9b137aa06d62dc53928ec Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Tue, 25 Feb 2025 15:02:22 +0000 Subject: [PATCH 36/40] Added command to untar waymo dataset files (#274) * Added command to untar waymo dataset files * Clean run-mobilenet-models code --- script/get-dataset-waymo/run-rclone.sh | 5 ++++- script/run-mlperf-inference-mobilenet-models/customize.py | 2 ++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/script/get-dataset-waymo/run-rclone.sh b/script/get-dataset-waymo/run-rclone.sh index caca3ff98..12aac3377 100644 --- a/script/get-dataset-waymo/run-rclone.sh +++ b/script/get-dataset-waymo/run-rclone.sh @@ -1,4 +1,7 @@ cmd="rclone sync mlc-waymo:waymo_preprocessed_dataset/kitti_format ${MLC_DATASET_WAYMO_PATH} -P" echo $cmd eval $cmd -test $? -eq 0 || exit $? \ No newline at end of file +test $? -eq 0 || exit $? +cd ${MLC_DATASET_WAYMO_PATH}/kitti_format/training +for f in *.tar.gz; do tar -xzvf "$f"; done +cd - diff --git a/script/run-mlperf-inference-mobilenet-models/customize.py b/script/run-mlperf-inference-mobilenet-models/customize.py index fb3238ca2..9601af651 100644 --- a/script/run-mlperf-inference-mobilenet-models/customize.py +++ b/script/run-mlperf-inference-mobilenet-models/customize.py @@ -200,6 +200,8 @@ def preprocess(i): if r['return'] > 0: print(r) # return r + else: + importlib.reload(mlc.action) if is_true(env.get('MLC_TEST_ONE_RUN', '')): return {'return': 0} From 7ec1c439371fc2c0b1cb5cc9a23d7b9973417ed9 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Wed, 26 Feb 2025 13:14:42 +0000 Subject: [PATCH 37/40] Support min_duration (#277) * Lower the prebuilt version for llvm * Revert "Lower the prebuilt version for llvm" This reverts commit b5c9fa7a1472421611777fc113e62a8c346471f1. * support more tf versions in build from src * Create power-server.conf * Fix the run command for power runs * Support min_duration --- script/benchmark-program-mlperf/customize.py | 4 +- .../customize.py | 10 ++-- script/install-tensorflow-from-src/meta.yaml | 48 +++++++++++++++++++ script/run-mlperf-inference-app/meta.yaml | 1 + .../run-mlperf-power-server/power-server.conf | 19 ++++++++ 5 files changed, 75 insertions(+), 7 deletions(-) create mode 100644 script/run-mlperf-power-server/power-server.conf diff --git a/script/benchmark-program-mlperf/customize.py b/script/benchmark-program-mlperf/customize.py index 3f92511eb..493505c48 100644 --- a/script/benchmark-program-mlperf/customize.py +++ b/script/benchmark-program-mlperf/customize.py @@ -33,9 +33,9 @@ def postprocess(i): echo \${MLC_MLPERF_RUN_COUNT} > \${MLC_RUN_DIR}/count.txt; if [ \${MLC_MLPERF_RUN_COUNT} -eq 1 ]; then -export MLC_MLPERF_USER_CONF="${MLC_MLPERF_RANGING_USER_CONF}"; +export MLC_MLPERF_USER_CONF="\${MLC_MLPERF_RANGING_USER_CONF}"; else -export MLC_MLPERF_USER_CONF="${MLC_MLPERF_TESTING_USER_CONF}"; +export MLC_MLPERF_USER_CONF="\${MLC_MLPERF_TESTING_USER_CONF}"; fi ; diff --git a/script/generate-mlperf-inference-user-conf/customize.py b/script/generate-mlperf-inference-user-conf/customize.py index ec1a5fab7..a9f8e3eaa 100644 --- a/script/generate-mlperf-inference-user-conf/customize.py +++ b/script/generate-mlperf-inference-user-conf/customize.py @@ -356,13 +356,13 @@ def preprocess(i): max_duration_ranging_s * 1000) # in milliseconds if scenario == "MultiStream" or scenario == "SingleStream": - if env.get('MLC_MLPERF_USE_MAX_DURATION', 'yes').lower() not in ["no", "false", "0"] and env.get( - 'MLC_MLPERF_MODEL_EQUAL_ISSUE_MODE', 'no').lower() not in ["yes", "1", "true"]: - user_conf += ml_model_name + "." + scenario + \ - f".max_duration = {max_duration_valid}" + "\n" - elif env.get('MLC_MLPERF_INFERENCE_MIN_DURATION', '') != '': + if env.get('MLC_MLPERF_INFERENCE_MIN_DURATION', '') != '': user_conf += ml_model_name + "." + scenario + ".min_duration = " + \ env['MLC_MLPERF_INFERENCE_MIN_DURATION'] + " \n" + elif not is_false(env.get('MLC_MLPERF_USE_MAX_DURATION', 'yes')) and not is_true(env.get( + 'MLC_MLPERF_MODEL_EQUAL_ISSUE_MODE', 'no')): + user_conf += ml_model_name + "." + scenario + \ + f".max_duration = {max_duration_valid}" + "\n" if scenario == "MultiStream": user_conf += ml_model_name + "." + scenario + ".min_query_count = " + \ env.get( diff --git a/script/install-tensorflow-from-src/meta.yaml b/script/install-tensorflow-from-src/meta.yaml index 30821bb38..c219d1179 100644 --- a/script/install-tensorflow-from-src/meta.yaml +++ b/script/install-tensorflow-from-src/meta.yaml @@ -344,3 +344,51 @@ versions: version: 5.0.0 env: MLC_GIT_CHECKOUT: v2.9.0 + v2.12.0: + deps: + - names: + - python + - python3 + tags: get,python + version_max: 3.11.999 + version_max_usable: 3.11.12 + version_min: 3.8.0 + - tags: get,gcc + version_max: '12.9' + version_min: '9' + - tags: get,bazel + version: 5.3.0 + env: + MLC_GIT_CHECKOUT: v2.12.0 + v2.15.0: + deps: + - names: + - python + - python3 + tags: get,python + version_max: 3.11.999 + version_max_usable: 3.11.12 + version_min: 3.9.0 + - tags: get,llvm + version_max: '16.9.999' + version_min: '16.0.0' + - tags: get,bazel + version: 6.1.0 + env: + MLC_GIT_CHECKOUT: v2.15.0 + v2.18.0: + deps: + - names: + - python + - python3 + tags: get,python + version_max: 3.12.999 + version_max_usable: 3.12.12 + version_min: 3.9.0 + - tags: get,llvm + version_max: '17.9.999' + version_min: '17.0.6' + - tags: get,bazel + version: 6.5.0 + env: + MLC_GIT_CHECKOUT: v2.18.0 diff --git a/script/run-mlperf-inference-app/meta.yaml b/script/run-mlperf-inference-app/meta.yaml index bf8b741a1..b19362ecc 100644 --- a/script/run-mlperf-inference-app/meta.yaml +++ b/script/run-mlperf-inference-app/meta.yaml @@ -67,6 +67,7 @@ input_mapping: imagenet_path: IMAGENET_PATH implementation: MLC_MLPERF_IMPLEMENTATION lang: MLC_MLPERF_IMPLEMENTATION + min_duration: MLC_MLPERF_INFERENCE_MIN_DURATION min_query_count: MLC_MLPERF_INFERENCE_MIN_QUERY_COUNT max_query_count: MLC_MLPERF_INFERENCE_MAX_QUERY_COUNT mode: MLC_MLPERF_LOADGEN_MODE diff --git a/script/run-mlperf-power-server/power-server.conf b/script/run-mlperf-power-server/power-server.conf new file mode 100644 index 000000000..70797bd62 --- /dev/null +++ b/script/run-mlperf-power-server/power-server.conf @@ -0,0 +1,19 @@ +[server] +ntpserver = time.google.com +listen = 0.0.0.0 4950 + +[ptd] +ptd = C:\Users\arjun\CM\repos\local\cache\5a0a52d578724774\repo\PTD\binaries\ptd-windows-x86.exe +analyzerCount = 2 + +[analyzer1] +interfaceflag = -y +devicetype = 49 +deviceport = C3YD21068E +networkport = 8888 + +[analyzer2] +interfaceflag = -g +devicetype = 8 +deviceport = 20 +networkport = 8889 From 301de963ac151c9f128f19f7de8ac8552a475d09 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Thu, 27 Feb 2025 01:09:51 +0000 Subject: [PATCH 38/40] Cleanup mobilenet runs (#279) * Added only_fp32 variation for compressed input mobilenet runs * Dont clean dataset for every mobilenet run * Dont clean dataset for every mobilenet run * Update armnn version --- script/get-lib-armnn/meta.yaml | 2 +- script/run-all-mlperf-models/run-mobilenet-models.sh | 6 ++++-- script/run-mlperf-inference-mobilenet-models/customize.py | 4 +++- script/run-mlperf-inference-mobilenet-models/meta.yaml | 3 ++- 4 files changed, 10 insertions(+), 5 deletions(-) diff --git a/script/get-lib-armnn/meta.yaml b/script/get-lib-armnn/meta.yaml index 6c33e97ed..ff71bf95a 100644 --- a/script/get-lib-armnn/meta.yaml +++ b/script/get-lib-armnn/meta.yaml @@ -3,7 +3,7 @@ automation_alias: script automation_uid: 5b4e0237da074764 cache: true category: Detection or installation of tools and artifacts -default_version: '23.11' +default_version: '25.02' deps: - tags: detect,os env: diff --git a/script/run-all-mlperf-models/run-mobilenet-models.sh b/script/run-all-mlperf-models/run-mobilenet-models.sh index 9ad2bc7cd..ad24ed87d 100644 --- a/script/run-all-mlperf-models/run-mobilenet-models.sh +++ b/script/run-all-mlperf-models/run-mobilenet-models.sh @@ -24,7 +24,8 @@ function run() { } POWER=" --power=yes --adr.mlperf-power-client.power_server=192.168.0.15 --adr.mlperf-power-client.port=4940 " POWER="" -extra_option="" +#extra_option=" --minimize_disk_usage=yes" +extra_option=" --minimize_disk_usage=no" extra_tags="" #extra_option=" --adr.mlperf-inference-implementation.compressed_dataset=on" #extra_tags=",_only-fp32" @@ -41,7 +42,8 @@ run "mlcr run,mobilenet-models,_tflite,_armnn,_neon$extra_tags \ ${extra_option} " -extra_option=" --adr.mlperf-inference-implementation.compressed_dataset=on" +extra_option=" $extra_option --adr.mlperf-inference-implementation.compressed_dataset=on" +extra_tag=",_only-fp32" run "mlcr run,mobilenet-models,_tflite$extra_tags \ --adr.compiler.tags=gcc \ ${extra_option} " diff --git a/script/run-mlperf-inference-mobilenet-models/customize.py b/script/run-mlperf-inference-mobilenet-models/customize.py index 9601af651..a7e3d5ee0 100644 --- a/script/run-mlperf-inference-mobilenet-models/customize.py +++ b/script/run-mlperf-inference-mobilenet-models/customize.py @@ -195,7 +195,7 @@ def preprocess(i): return r importlib.reload(mlc.action) - if is_true(env.get('MLC_MINIMIZE_DISK_SPACE', '')): + if is_true(env.get('MLC_MINIMIZE_DISK_USAGE', '')): r = cache_action.access(clean_input) if r['return'] > 0: print(r) @@ -206,12 +206,14 @@ def preprocess(i): if is_true(env.get('MLC_TEST_ONE_RUN', '')): return {'return': 0} + ''' r = cache_action.access(clean_input) if r['return'] > 0: print(r) # return r else: importlib.reload(mlc.action) + ''' return {'return': 0} diff --git a/script/run-mlperf-inference-mobilenet-models/meta.yaml b/script/run-mlperf-inference-mobilenet-models/meta.yaml index a6a42760a..813b1ef93 100644 --- a/script/run-mlperf-inference-mobilenet-models/meta.yaml +++ b/script/run-mlperf-inference-mobilenet-models/meta.yaml @@ -9,7 +9,7 @@ default_env: MLC_MLPERF_RUN_INT8: 'yes' MLC_MLPERF_RUN_MOBILENETS: 'no' MLC_USE_DATASET_FROM_HOST: 'yes' - MLC_MINIMIZE_DISK_SPACE: 'yes' + MLC_MINIMIZE_DISK_USAGE: 'no' deps: - tags: get,sys-utils-mlc docker: @@ -35,6 +35,7 @@ docker: imagenet_path: MLC_DATASET_IMAGENET_PATH results_dir: MLC_MLPERF_INFERENCE_RESULTS_DIR submission_dir: MLC_MLPERF_INFERENCE_SUBMISSION_DIR + minimize_disk_usage: MLC_MINIMIZE_DISK_USAGE fake_run_deps: false pre_run_cmds: - mlc pull repo From c8cb2c378fcc84d44fe20a81ef24956bc93dffc0 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Thu, 27 Feb 2025 07:53:16 +0000 Subject: [PATCH 39/40] Update classification.cpp (#280) --- .../armnn/classification.cpp | 6 ------ 1 file changed, 6 deletions(-) diff --git a/script/app-mlperf-inference-ctuning-cpp-tflite/armnn/classification.cpp b/script/app-mlperf-inference-ctuning-cpp-tflite/armnn/classification.cpp index c7a07faa8..ff10fb074 100644 --- a/script/app-mlperf-inference-ctuning-cpp-tflite/armnn/classification.cpp +++ b/script/app-mlperf-inference-ctuning-cpp-tflite/armnn/classification.cpp @@ -422,12 +422,6 @@ void TestSingleStream(Program *prg) { ? mlperf::TestMode::FindPeakPerformance : mlperf::TestMode::SubmissionRun; - if (ts.FromConfig(mlperf_conf_path, model_name, scenario_string)) { - std::cout << "Issue with mlperf.conf file at " << mlperf_conf_path - << std::endl; - exit(1); - } - if (ts.FromConfig(user_conf_path, model_name, scenario_string)) { std::cout << "Issue with user.conf file at " << user_conf_path << std::endl; exit(1); From 3cb1135361635d29f7e1e73ae6d93f625cf908c2 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Sun, 2 Mar 2025 08:00:11 +0530 Subject: [PATCH 40/40] fix duplication of automation object (#282) --- automation/script/module.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/automation/script/module.py b/automation/script/module.py index cadf35608..acdacff8e 100644 --- a/automation/script/module.py +++ b/automation/script/module.py @@ -1610,6 +1610,7 @@ def _run(self, i): ii['env'] = env ii['state'] = state ii['meta'] = meta + ii['automation'] = self # may need to detect versions in multiple paths ii['run_script_input'] = run_script_input @@ -1762,6 +1763,7 @@ def _run(self, i): ii['env'] = env ii['state'] = state ii['meta'] = meta + ii['automation'] = self # may need to detect versions in multiple paths ii['run_script_input'] = run_script_input @@ -5518,6 +5520,7 @@ def run_detect_version(customize_code, customize_common_input, ii['env'] = env ii['state'] = state ii['meta'] = meta + ii['automation'] = customize_common_input['automation'] r = customize_code.detect_version(ii) return r @@ -5548,6 +5551,7 @@ def run_postprocess(customize_code, customize_common_input, recursion_spaces, ii['env'] = env ii['state'] = state ii['meta'] = meta + ii['automation'] = customize_common_input['automation'] if run_script_input is not None: ii['run_script_input'] = run_script_input